.+)$")
+
+ def __init__(self, parser):
+ self._parser = parser
+ self._data = {}
+ for getter in dir(self._parser):
+ m = self.GETTERCRE.match(getter)
+ if not m or not callable(getattr(self._parser, getter)):
+ continue
+ self._data[m.group('name')] = None # See class docstring.
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __setitem__(self, key, value):
+ try:
+ k = 'get' + key
+ except TypeError:
+ raise ValueError('Incompatible key: {} (type: {})'
+ ''.format(key, type(key)))
+ if k == 'get':
+ raise ValueError('Incompatible key: cannot use "" as a name')
+ self._data[key] = value
+ func = functools.partial(self._parser._get_conv, conv=value)
+ func.converter = value
+ setattr(self._parser, k, func)
+ for proxy in self._parser.values():
+ getter = functools.partial(proxy.get, _impl=func)
+ setattr(proxy, k, getter)
+
+ def __delitem__(self, key):
+ try:
+ k = 'get' + (key or None)
+ except TypeError:
+ raise KeyError(key)
+ del self._data[key]
+ for inst in itertools.chain((self._parser,), self._parser.values()):
+ try:
+ delattr(inst, k)
+ except AttributeError:
+ # don't raise since the entry was present in _data, silently
+ # clean up
+ continue
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __len__(self):
+ return len(self._data)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/backports/configparser/helpers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/backports/configparser/helpers.py
new file mode 100755
index 00000000..2d743fba
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/backports/configparser/helpers.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import abc
+import os
+
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
+
+try:
+ from collections import UserDict
+except ImportError:
+ from UserDict import UserDict
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
+try:
+ import pathlib
+except ImportError:
+ pathlib = None
+
+from io import open
+import sys
+try:
+ from thread import get_ident
+except ImportError:
+ try:
+ from _thread import get_ident
+ except ImportError:
+ from _dummy_thread import get_ident
+
+
+__all__ = ['UserDict', 'OrderedDict', 'open']
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+native_str = str
+str = type('str')
+
+
+def from_none(exc):
+ """raise from_none(ValueError('a')) == raise ValueError('a') from None"""
+ exc.__cause__ = None
+ exc.__suppress_context__ = True
+ return exc
+
+
+# from reprlib 3.2.1
+def recursive_repr(fillvalue='...'):
+ 'Decorator to make a repr function return fillvalue for a recursive call'
+
+ def decorating_function(user_function):
+ repr_running = set()
+
+ def wrapper(self):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ result = user_function(self)
+ finally:
+ repr_running.discard(key)
+ return result
+
+ # Can't use functools.wraps() here because of bootstrap issues
+ wrapper.__module__ = getattr(user_function, '__module__')
+ wrapper.__doc__ = getattr(user_function, '__doc__')
+ wrapper.__name__ = getattr(user_function, '__name__')
+ wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+ return wrapper
+
+ return decorating_function
+
+
+# from collections 3.2.1
+class _ChainMap(MutableMapping):
+ ''' A ChainMap groups multiple dicts (or other mappings) together
+ to create a single, updateable view.
+
+ The underlying mappings are stored in a list. That list is public and can
+ accessed or updated using the *maps* attribute. There is no other state.
+
+ Lookups search the underlying mappings successively until a key is found.
+ In contrast, writes, updates, and deletions only operate on the first
+ mapping.
+
+ '''
+
+ def __init__(self, *maps):
+ '''Initialize a ChainMap by setting *maps* to the given mappings.
+ If no mappings are provided, a single empty dictionary is used.
+
+ '''
+ self.maps = list(maps) or [{}] # always at least one map
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ for mapping in self.maps:
+ try:
+ # can't use 'key in mapping' with defaultdict
+ return mapping[key]
+ except KeyError:
+ pass
+ # support subclasses that define __missing__
+ return self.__missing__(key)
+
+ def get(self, key, default=None):
+ return self[key] if key in self else default
+
+ def __len__(self):
+ # reuses stored hash values if possible
+ return len(set().union(*self.maps))
+
+ def __iter__(self):
+ return iter(set().union(*self.maps))
+
+ def __contains__(self, key):
+ return any(key in m for m in self.maps)
+
+ @recursive_repr()
+ def __repr__(self):
+ return '{0.__class__.__name__}({1})'.format(
+ self, ', '.join(map(repr, self.maps)))
+
+ @classmethod
+ def fromkeys(cls, iterable, *args):
+ 'Create a ChainMap with a single dict created from the iterable.'
+ return cls(dict.fromkeys(iterable, *args))
+
+ def copy(self):
+ """
+ New ChainMap or subclass with a new copy of
+ maps[0] and refs to maps[1:]
+ """
+ return self.__class__(self.maps[0].copy(), *self.maps[1:])
+
+ __copy__ = copy
+
+ def new_child(self): # like Django's Context.push()
+ 'New ChainMap with a new dict followed by all previous maps.'
+ return self.__class__({}, *self.maps)
+
+ @property
+ def parents(self): # like Django's Context.pop()
+ 'New ChainMap from maps[1:].'
+ return self.__class__(*self.maps[1:])
+
+ def __setitem__(self, key, value):
+ self.maps[0][key] = value
+
+ def __delitem__(self, key):
+ try:
+ del self.maps[0][key]
+ except KeyError:
+ raise KeyError(
+ 'Key not found in the first mapping: {!r}'.format(key))
+
+ def popitem(self):
+ """
+ Remove and return an item pair from maps[0].
+ Raise KeyError is maps[0] is empty.
+ """
+ try:
+ return self.maps[0].popitem()
+ except KeyError:
+ raise KeyError('No keys found in the first mapping.')
+
+ def pop(self, key, *args):
+ """
+ Remove *key* from maps[0] and return its value.
+ Raise KeyError if *key* not in maps[0].
+ """
+
+ try:
+ return self.maps[0].pop(key, *args)
+ except KeyError:
+ raise KeyError(
+ 'Key not found in the first mapping: {!r}'.format(key))
+
+ def clear(self):
+ 'Clear maps[0], leaving maps[1:] intact.'
+ self.maps[0].clear()
+
+
+try:
+ from collections import ChainMap
+except ImportError:
+ ChainMap = _ChainMap
+
+
+_ABC = getattr(
+ abc, 'ABC',
+ # Python 3.3 compatibility
+ abc.ABCMeta(
+ native_str('__ABC'),
+ (object,),
+ dict(__metaclass__=abc.ABCMeta),
+ ),
+)
+
+
+class _PathLike(_ABC):
+
+ """Abstract base class for implementing the file system path protocol."""
+
+ @abc.abstractmethod
+ def __fspath__(self):
+ """Return the file system path representation of the object."""
+ raise NotImplementedError
+
+ @classmethod
+ def __subclasshook__(cls, subclass):
+ return bool(
+ hasattr(subclass, '__fspath__')
+ # workaround for Python 3.5
+ or pathlib and issubclass(subclass, pathlib.Path)
+ )
+
+
+PathLike = getattr(os, 'PathLike', _PathLike)
+
+
+def _fspath(path):
+ """Return the path representation of a path-like object.
+
+ If str or bytes is passed in, it is returned unchanged. Otherwise the
+ os.PathLike interface is used to get the path representation. If the
+ path representation is not str or bytes, TypeError is raised. If the
+ provided path is not str, bytes, or os.PathLike, TypeError is raised.
+ """
+ if isinstance(path, (str, bytes)):
+ return path
+
+ if not hasattr(path, '__fspath__') and isinstance(path, pathlib.Path):
+ # workaround for Python 3.5
+ return str(path)
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ path_repr = path_type.__fspath__(path)
+ except AttributeError:
+
+ if hasattr(path_type, '__fspath__'):
+ raise
+ else:
+ raise TypeError("expected str, bytes or os.PathLike object, "
+ "not " + path_type.__name__)
+ if isinstance(path_repr, (str, bytes)):
+ return path_repr
+ else:
+ raise TypeError("expected {}.__fspath__() to return str or bytes, "
+ "not {}".format(path_type.__name__,
+ type(path_repr).__name__))
+
+
+fspath = getattr(os, 'fspath', _fspath)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/chardetect b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/chardetect
new file mode 100755
index 00000000..62ee81ee
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/chardetect
@@ -0,0 +1,12 @@
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'chardet==3.0.4','console_scripts','chardetect'
+__requires__ = 'chardet==3.0.4'
+import re
+import sys
+from pkg_resources import load_entry_point
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('chardet==3.0.4', 'console_scripts', 'chardetect')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/futurize b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/futurize
index 392e2fad..c2cbec45 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/futurize
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/futurize
@@ -1,8 +1,12 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','futurize'
+__requires__ = 'future==0.17.1'
import re
import sys
-from libfuturize.main import main
+from pkg_resources import load_entry_point
+
if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('future==0.17.1', 'console_scripts', 'futurize')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/json
deleted file mode 100755
index 76697c5c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/json
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-import re
-import sys
-from jsonspec.cli import main
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath.py
index 6b674505..7c34053d 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath.py
@@ -1,8 +1,12 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'jsonpath-rw==1.4.0','console_scripts','jsonpath.py'
+__requires__ = 'jsonpath-rw==1.4.0'
import re
import sys
-from jsonpath_rw.bin.jsonpath import entry_point
+from pkg_resources import load_entry_point
+
if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(entry_point())
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('jsonpath-rw==1.4.0', 'console_scripts', 'jsonpath.py')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath_ng b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath_ng
deleted file mode 100755
index 2290ad32..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonpath_ng
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-import re
-import sys
-from jsonpath_ng.bin.jsonpath import entry_point
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(entry_point())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonschema b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonschema
index 902c2bae..ceb4e1de 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonschema
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/jsonschema
@@ -1,8 +1,12 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'jsonschema==2.6.0','console_scripts','jsonschema'
+__requires__ = 'jsonschema==2.6.0'
import re
import sys
-from jsonschema.cli import main
+from pkg_resources import load_entry_point
+
if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('jsonschema==2.6.0', 'console_scripts', 'jsonschema')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/mako-render b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/mako-render
index 2b8ef7c3..8d3e76d7 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/mako-render
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/mako-render
@@ -1,8 +1,12 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'Mako==1.1.0','console_scripts','mako-render'
+__requires__ = 'Mako==1.1.0'
import re
import sys
-from mako.cmd import cmdline
+from pkg_resources import load_entry_point
+
if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(cmdline())
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('Mako==1.1.0', 'console_scripts', 'mako-render')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/normalizer b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/normalizer
deleted file mode 100755
index 937b45c9..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/normalizer
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
-import re
-import sys
-from charset_normalizer.cli.normalizer import cli_detect
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(cli_detect())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/pasteurize b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/pasteurize
index 6106a533..d1e6ed32 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/pasteurize
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/bin/pasteurize
@@ -1,8 +1,12 @@
-#!/usr/bin/python3
-# -*- coding: utf-8 -*-
+#!/usr/local/bin/python3.7
+# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','pasteurize'
+__requires__ = 'future==0.17.1'
import re
import sys
-from libpasteurize.main import main
+from pkg_resources import load_entry_point
+
if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('future==0.17.1', 'console_scripts', 'pasteurize')()
+ )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/LICENSE
deleted file mode 100644
index c2fda9a2..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-This package contains a modified version of ca-bundle.crt:
-
-ca-bundle.crt -- Bundle of CA Root Certificates
-
-Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
-This is a bundle of X.509 certificates of public Certificate Authorities
-(CA). These were automatically extracted from Mozilla's root certificates
-file (certdata.txt). This file can be found in the mozilla source tree:
-http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
-It contains the certificates in PEM format and therefore
-can be directly used with curl / libcurl / php_curl, or with
-an Apache+mod_ssl webserver for SSL client authentication.
-Just configure this file as the SSLCACertificateFile.#
-
-***** BEGIN LICENSE BLOCK *****
-This Source Code Form is subject to the terms of the Mozilla Public License,
-v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
-one at http://mozilla.org/MPL/2.0/.
-
-***** END LICENSE BLOCK *****
-@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/METADATA
deleted file mode 100644
index 7a6860db..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/METADATA
+++ /dev/null
@@ -1,83 +0,0 @@
-Metadata-Version: 2.1
-Name: certifi
-Version: 2021.10.8
-Summary: Python package for providing Mozilla's CA Bundle.
-Home-page: https://certifiio.readthedocs.io/en/latest/
-Author: Kenneth Reitz
-Author-email: me@kennethreitz.com
-License: MPL-2.0
-Project-URL: Documentation, https://certifiio.readthedocs.io/en/latest/
-Project-URL: Source, https://github.com/certifi/python-certifi
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-
-Certifi: Python SSL Certificates
-================================
-
-`Certifi`_ provides Mozilla's carefully curated collection of Root Certificates for
-validating the trustworthiness of SSL certificates while verifying the identity
-of TLS hosts. It has been extracted from the `Requests`_ project.
-
-Installation
-------------
-
-``certifi`` is available on PyPI. Simply install it with ``pip``::
-
- $ pip install certifi
-
-Usage
------
-
-To reference the installed certificate authority (CA) bundle, you can use the
-built-in function::
-
- >>> import certifi
-
- >>> certifi.where()
- '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
-
-Or from the command line::
-
- $ python -m certifi
- /usr/local/lib/python3.7/site-packages/certifi/cacert.pem
-
-Enjoy!
-
-1024-bit Root Certificates
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Browsers and certificate authorities have concluded that 1024-bit keys are
-unacceptably weak for certificates, particularly root certificates. For this
-reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
-bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
-certificate from the same CA. Because Mozilla removed these certificates from
-its bundle, ``certifi`` removed them as well.
-
-In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
-to intentionally re-add the 1024-bit roots back into your bundle. This was not
-recommended in production and therefore was removed at the end of 2018.
-
-.. _`Certifi`: https://certifiio.readthedocs.io/en/latest/
-.. _`Requests`: https://requests.readthedocs.io/en/master/
-
-Addition/Removal of Certificates
---------------------------------
-
-Certifi does not support any addition/removal or other modification of the
-CA trust store content. This project is intended to provide a reliable and
-highly portable root of trust to python deployments. Look to upstream projects
-for methods to use alternate trust.
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/RECORD
deleted file mode 100644
index 0c541d4b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/RECORD
+++ /dev/null
@@ -1,10 +0,0 @@
-certifi-2021.10.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-certifi-2021.10.8.dist-info/LICENSE,sha256=vp2C82ES-Hp_HXTs1Ih-FGe7roh4qEAEoAEXseR1o-I,1049
-certifi-2021.10.8.dist-info/METADATA,sha256=iB_zbT1uX_8_NC7iGv0YEB-9b3idhQwHrFTSq8R1kD8,2994
-certifi-2021.10.8.dist-info/RECORD,,
-certifi-2021.10.8.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
-certifi-2021.10.8.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
-certifi/__init__.py,sha256=xWdRgntT3j1V95zkRipGOg_A1UfEju2FcpujhysZLRI,62
-certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
-certifi/cacert.pem,sha256=-og4Keu4zSpgL5shwfhd4kz0eUnVILzrGCi0zRy2kGw,265969
-certifi/core.py,sha256=V0uyxKOYdz6ulDSusclrLmjbPgOXsD0BnEf0SQ7OnoE,2303
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/WHEEL
deleted file mode 100644
index 6d38aa06..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.35.1)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/top_level.txt
deleted file mode 100644
index 963eac53..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi-2021.10.8.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-certifi
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__init__.py
old mode 100644
new mode 100755
index 8db1a0e5..0d59a056
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__init__.py
@@ -1,3 +1,3 @@
-from .core import contents, where
+from .core import where
-__version__ = "2021.10.08"
+__version__ = "2019.11.28"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__main__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__main__.py
old mode 100644
new mode 100755
index 8945b5da..5f1da0dd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__main__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/__main__.py
@@ -1,12 +1,2 @@
-import argparse
-
-from certifi import contents, where
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--contents", action="store_true")
-args = parser.parse_args()
-
-if args.contents:
- print(contents())
-else:
- print(where())
+from certifi import where
+print(where())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/cacert.pem b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/cacert.pem
index 6d0ccc0d..a4758ef3 100644
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/cacert.pem
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/cacert.pem
@@ -58,6 +58,38 @@ AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
+# Serial: 206684696279472310254277870180966723415
+# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
+# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
+# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Label: "Entrust.net Premium 2048 Secure Server CA"
@@ -120,6 +152,39 @@ ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
-----END CERTIFICATE-----
+# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Label: "AddTrust External Root"
+# Serial: 1
+# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
+# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
+# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Label: "Entrust Root Certification Authority"
@@ -155,6 +220,112 @@ eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
0vdXcDazv/wor3ElhVsT/h5/WrQ8
-----END CERTIFICATE-----
+# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Label: "GeoTrust Global CA"
+# Serial: 144470
+# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
+# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
+# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA"
+# Serial: 1
+# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
+# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
+# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA 2"
+# Serial: 1
+# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
+# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
+# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
# Subject: CN=AAA Certificate Services O=Comodo CA Limited
# Label: "Comodo AAA Services root"
@@ -188,6 +359,48 @@ l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
-----END CERTIFICATE-----
+# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Label: "QuoVadis Root CA"
+# Serial: 985026699
+# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
+# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
+# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
+MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
+IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
+dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
+li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
+rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
+WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
+F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
+xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
+Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
+dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
+ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
+IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
+c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
+ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
+KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
+KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
+y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
+dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
+VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
+MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
+fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
+7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
+cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
+mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
+xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
+SnQ2+Q==
+-----END CERTIFICATE-----
+
# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Label: "QuoVadis Root CA 2"
@@ -303,6 +516,33 @@ JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
-----END CERTIFICATE-----
+# Issuer: CN=Sonera Class2 CA O=Sonera
+# Subject: CN=Sonera Class2 CA O=Sonera
+# Label: "Sonera Class 2 Root CA"
+# Serial: 29
+# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
+# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
+# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
+MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
+MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
+BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
+Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
+5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
+3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
+vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
+8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
+DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
+MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
+zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
+3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
+FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
+Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
+ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
+-----END CERTIFICATE-----
+
# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Label: "XRamp Global CA Root"
@@ -400,6 +640,46 @@ VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
-----END CERTIFICATE-----
+# Issuer: O=Government Root Certification Authority
+# Subject: O=Government Root Certification Authority
+# Label: "Taiwan GRCA"
+# Serial: 42023070807708724159991140556527066870
+# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
+# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
+# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
+-----BEGIN CERTIFICATE-----
+MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
+MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
+PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
+IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
+gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
+yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
+F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
+jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
+ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
+VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
+YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
+EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
+Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
+DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
+MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
+UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
+TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
+qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
+ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
+JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
+hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
+EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
+nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
+udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
+ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
+LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
+pYYsfPQS
+-----END CERTIFICATE-----
+
# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root CA"
@@ -601,6 +881,104 @@ hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
-----END CERTIFICATE-----
+# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Label: "GeoTrust Primary Certification Authority"
+# Serial: 32798226551256963324313806436981982369
+# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
+# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
+# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA"
+# Serial: 69529181992039203566298953787712940909
+# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
+# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
+# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
+# Serial: 33037644167568058970164719475676101450
+# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
+# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
+# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
# Subject: CN=SecureTrust CA O=SecureTrust Corporation
# Label: "SecureTrust CA"
@@ -749,6 +1127,38 @@ fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
-----END CERTIFICATE-----
+# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GA CA"
+# Serial: 86718877871133159090080555911823548314
+# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
+# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
+# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
+ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
+aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
+NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
+A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
+SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
+VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
+w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
+mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
+4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
+4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
+EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
+SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
+ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
+vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
+Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
+/L7fCg0=
+-----END CERTIFICATE-----
+
# Issuer: CN=Certigna O=Dhimyotis
# Subject: CN=Certigna O=Dhimyotis
# Label: "Certigna"
@@ -878,6 +1288,185 @@ i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
9u6wWk5JRFRYX0KD
-----END CERTIFICATE-----
+# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G3"
+# Serial: 28809105769928564313984085209975885599
+# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
+# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
+# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G2"
+# Serial: 71758320672825410020661621085256472406
+# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
+# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
+# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G3"
+# Serial: 127614157056681299805556476275995414779
+# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
+# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
+# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G2"
+# Serial: 80682863203381065782177908751794619243
+# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
+# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
+# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Universal Root Certification Authority"
+# Serial: 85209574734084581917763752644031726877
+# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
+# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
+# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
+# Serial: 63143484348153506665311985501458640051
+# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
+# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
+# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
@@ -910,6 +1499,47 @@ uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
-----END CERTIFICATE-----
+# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G2"
+# Serial: 10000012
+# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
+# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
+# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
+DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
+qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
+uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
+Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
+pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
+5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
+UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
+GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
+5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
+6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
+eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
+B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
+BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
+L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
+SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
+CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
+5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
+IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
+gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
+vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
+bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
+N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
+Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
+ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
+-----END CERTIFICATE-----
+
# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Label: "Hongkong Post Root CA 1"
@@ -1113,6 +1743,105 @@ naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
-----END CERTIFICATE-----
+# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Label: "Chambers of Commerce Root - 2008"
+# Serial: 11806822484801597146
+# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
+# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
+# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
+IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
+MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
+dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
+EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
+MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
+28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
+VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
+DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
+5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
+ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
+Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
+UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
+Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
+hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
+HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
+YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
+L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
+ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
+IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
+HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
+DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
+PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
+5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
+glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
+FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
+pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
+xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
+tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
+jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
+fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
+d0jQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Label: "Global Chambersign Root - 2008"
+# Serial: 14541511773111788494
+# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
+# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
+# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
+MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
+cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
+A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
+BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
+KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
+G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
+zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
+ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
+HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
+Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
+yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
+beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
+6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
+zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
+BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
+ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
+ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
+cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
+YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
+CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
+KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
+hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
+UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
+X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
+fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
+a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
+Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
+SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
+AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
+M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
+v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
+-----END CERTIFICATE-----
+
# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Label: "Go Daddy Root Certificate Authority - G2"
@@ -1411,45 +2140,6 @@ t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
-----END CERTIFICATE-----
-# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
-# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
-# Label: "EC-ACC"
-# Serial: -23701579247955709139626555126524820479
-# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09
-# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8
-# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99
------BEGIN CERTIFICATE-----
-MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB
-8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy
-dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1
-YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3
-dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh
-IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD
-LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG
-EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g
-KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD
-ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu
-bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg
-ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN
-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R
-85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm
-4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV
-HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd
-QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t
-lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB
-o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
-BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4
-opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo
-dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW
-ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN
-AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y
-/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k
-SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy
-Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS
-Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl
-nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI=
------END CERTIFICATE-----
-
# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions RootCA 2011"
@@ -1524,6 +2214,35 @@ LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
-----END CERTIFICATE-----
+# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
+# Subject: O=Trustis Limited OU=Trustis FPS Root CA
+# Label: "Trustis FPS Root CA"
+# Serial: 36053640375399034304724988975563710553
+# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
+# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
+# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
+ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
+MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
+MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
+iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
+vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
+0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
+OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
+BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
+FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
+GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
+zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
+1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
+f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
+jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
+ZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+
# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Label: "Buypass Class 2 Root CA"
@@ -1633,6 +2352,38 @@ e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
TpPDpFQUWw==
-----END CERTIFICATE-----
+# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Label: "EE Certification Centre Root CA"
+# Serial: 112324828676200291871926431888494945866
+# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
+# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
+# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
+MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
+czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
+CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
+MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
+ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
+b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
+euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
+bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
+WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
+MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
+1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
+zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
+BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
+BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
+v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
+E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
+iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
+GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
+-----END CERTIFICATE-----
+
# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Label: "D-TRUST Root Class 3 CA 2 2009"
@@ -2385,6 +3136,46 @@ KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
xwy8p2Fp8fc74SrL+SvzZpA3
-----END CERTIFICATE-----
+# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G3"
+# Serial: 10003001
+# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
+# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
+# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
+DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
+cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
+IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
+xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
+KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
+9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
+5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
+6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
+Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
+bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
+BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
+XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
+INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
+LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
+Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
+gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
+/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
+0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
+fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
+4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
+1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
+QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
+94B7IWcnMFk=
+-----END CERTIFICATE-----
+
# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Label: "Staat der Nederlanden EV Root CA"
@@ -2958,6 +3749,47 @@ CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
1KyLa2tJElMzrdfkviT8tQp21KW8EA==
-----END CERTIFICATE-----
+# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Label: "LuxTrust Global Root 2"
+# Serial: 59914338225734147123941058376788110305822489521
+# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
+# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
+# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
+-----BEGIN CERTIFICATE-----
+MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
+BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
+BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
+MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
+LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
+ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
+hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
+EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
+Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
+zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
+96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
+j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
+DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
+8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
+X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
+hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
+KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
+Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
+BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
+BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
+jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
+loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
+qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
+JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
+zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
+LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
+oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
+-----END CERTIFICATE-----
+
# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
@@ -3768,595 +4600,3 @@ IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
-----END CERTIFICATE-----
-
-# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
-# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
-# Label: "Microsoft ECC Root Certificate Authority 2017"
-# Serial: 136839042543790627607696632466672567020
-# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
-# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
-# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
------BEGIN CERTIFICATE-----
-MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
-CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
-VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
-MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
-UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
-b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
-hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
-ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
-hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
-BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
-FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
-L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
-iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
------END CERTIFICATE-----
-
-# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
-# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
-# Label: "Microsoft RSA Root Certificate Authority 2017"
-# Serial: 40975477897264996090493496164228220339
-# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
-# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
-# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
------BEGIN CERTIFICATE-----
-MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
-MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
-NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
-IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
-EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
-aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
-MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
-Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
-ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
-HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
-gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
-jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
-aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
-YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
-W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
-UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
-+FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
-W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
-BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
-NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
-LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
-gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
-tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
-SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
-TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
-pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
-xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
-GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
-dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
-AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
-RA+GsCyRxj3qrg+E
------END CERTIFICATE-----
-
-# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
-# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
-# Label: "e-Szigno Root CA 2017"
-# Serial: 411379200276854331539784714
-# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
-# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
-# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
------BEGIN CERTIFICATE-----
-MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
-BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
-LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
-b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
-BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
-THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
-IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
-xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
-Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
-A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
-eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
-jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
-+efcMQ==
------END CERTIFICATE-----
-
-# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
-# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
-# Label: "certSIGN Root CA G2"
-# Serial: 313609486401300475190
-# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
-# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
-# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
------BEGIN CERTIFICATE-----
-MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
-BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
-Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
-BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
-R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
-dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
-vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
-uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
-n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
-cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
-xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
-rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
-DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
-DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
-LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
-eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
-/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
-d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
-kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
-b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
-qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
-OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
-NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
-ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
-pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
-03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
-PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
-1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
-QRBdJ3NghVdJIgc=
------END CERTIFICATE-----
-
-# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
-# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
-# Label: "Trustwave Global Certification Authority"
-# Serial: 1846098327275375458322922162
-# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e
-# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5
-# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8
------BEGIN CERTIFICATE-----
-MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw
-CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x
-ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1
-c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx
-OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI
-SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI
-b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp
-Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
-ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn
-swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu
-7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8
-1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW
-80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP
-JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l
-RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw
-hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10
-coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc
-BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n
-twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud
-EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud
-DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W
-0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe
-uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q
-lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB
-aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE
-sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT
-MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe
-qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh
-VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8
-h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9
-EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK
-yeC2nOnOcXHebD8WpHk=
------END CERTIFICATE-----
-
-# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
-# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
-# Label: "Trustwave Global ECC P256 Certification Authority"
-# Serial: 4151900041497450638097112925
-# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54
-# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf
-# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4
------BEGIN CERTIFICATE-----
-MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD
-VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
-BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
-YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
-NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G
-A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
-d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
-Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG
-SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN
-FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w
-DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw
-CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh
-DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7
------END CERTIFICATE-----
-
-# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
-# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
-# Label: "Trustwave Global ECC P384 Certification Authority"
-# Serial: 2704997926503831671788816187
-# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6
-# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2
-# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97
------BEGIN CERTIFICATE-----
-MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD
-VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
-BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
-YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
-NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G
-A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
-d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
-Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB
-BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ
-j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF
-1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G
-A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3
-AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC
-MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu
-Sw==
------END CERTIFICATE-----
-
-# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
-# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
-# Label: "NAVER Global Root Certification Authority"
-# Serial: 9013692873798656336226253319739695165984492813
-# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b
-# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1
-# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65
------BEGIN CERTIFICATE-----
-MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM
-BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG
-T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0
-aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx
-CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD
-b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB
-dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA
-iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH
-38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE
-HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz
-kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP
-szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq
-vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf
-nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG
-YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo
-0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a
-CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K
-AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I
-36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
-Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN
-qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj
-cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm
-+LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL
-hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe
-lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7
-p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8
-piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR
-LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX
-5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO
-dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul
-9XXeifdy
------END CERTIFICATE-----
-
-# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
-# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
-# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS"
-# Serial: 131542671362353147877283741781055151509
-# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb
-# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a
-# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb
------BEGIN CERTIFICATE-----
-MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw
-CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw
-FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S
-Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5
-MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL
-DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS
-QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB
-BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH
-sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK
-Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
-VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu
-SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC
-MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy
-v+c=
------END CERTIFICATE-----
-
-# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa
-# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa
-# Label: "GlobalSign Root R46"
-# Serial: 1552617688466950547958867513931858518042577
-# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef
-# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90
-# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9
------BEGIN CERTIFICATE-----
-MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA
-MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD
-VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy
-MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt
-c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
-AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ
-OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG
-vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud
-316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo
-0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE
-y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF
-zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE
-+cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN
-I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs
-x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa
-ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC
-4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
-HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4
-7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg
-JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti
-2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk
-pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF
-FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt
-rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk
-ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5
-u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP
-4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6
-N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3
-vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6
------END CERTIFICATE-----
-
-# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa
-# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa
-# Label: "GlobalSign Root E46"
-# Serial: 1552617690338932563915843282459653771421763
-# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f
-# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84
-# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58
------BEGIN CERTIFICATE-----
-MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx
-CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD
-ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw
-MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
-HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
-IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq
-R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd
-yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
-DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ
-7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8
-+RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A=
------END CERTIFICATE-----
-
-# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
-# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
-# Label: "GLOBALTRUST 2020"
-# Serial: 109160994242082918454945253
-# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8
-# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2
-# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a
------BEGIN CERTIFICATE-----
-MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG
-A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw
-FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx
-MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u
-aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq
-hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b
-RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z
-YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3
-QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw
-yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+
-BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ
-SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH
-r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0
-4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me
-dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw
-q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2
-nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
-AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu
-H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA
-VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC
-XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd
-6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf
-+I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi
-kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7
-wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB
-TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C
-MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn
-4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I
-aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy
-qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg==
------END CERTIFICATE-----
-
-# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
-# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
-# Label: "ANF Secure Server Root CA"
-# Serial: 996390341000653745
-# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96
-# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74
-# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99
------BEGIN CERTIFICATE-----
-MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV
-BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk
-YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV
-BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN
-MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF
-UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD
-VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v
-dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj
-cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q
-yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH
-2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX
-H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL
-zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR
-p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz
-W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/
-SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn
-LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3
-n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B
-u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj
-o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO
-BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
-AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L
-9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej
-rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK
-pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0
-vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq
-OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ
-/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9
-2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI
-+PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2
-MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo
-tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw=
------END CERTIFICATE-----
-
-# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
-# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
-# Label: "Certum EC-384 CA"
-# Serial: 160250656287871593594747141429395092468
-# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1
-# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed
-# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6
------BEGIN CERTIFICATE-----
-MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw
-CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw
-JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT
-EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0
-WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT
-LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX
-BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE
-KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm
-Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj
-QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8
-EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J
-UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn
-nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k=
------END CERTIFICATE-----
-
-# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
-# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
-# Label: "Certum Trusted Root CA"
-# Serial: 40870380103424195783807378461123655149
-# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29
-# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5
-# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd
------BEGIN CERTIFICATE-----
-MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6
-MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu
-MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV
-BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw
-MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg
-U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo
-b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG
-SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ
-n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q
-p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq
-NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF
-8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3
-HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa
-mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi
-7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF
-ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P
-qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ
-v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6
-Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1
-vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD
-ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4
-WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo
-zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR
-5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ
-GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf
-5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq
-0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D
-P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM
-qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP
-0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf
-E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb
------END CERTIFICATE-----
-
-# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
-# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
-# Label: "TunTrust Root CA"
-# Serial: 108534058042236574382096126452369648152337120275
-# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4
-# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb
-# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41
------BEGIN CERTIFICATE-----
-MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL
-BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg
-Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv
-b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG
-EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u
-IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ
-KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ
-n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd
-2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF
-VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ
-GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF
-li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU
-r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2
-eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb
-MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg
-jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB
-7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW
-5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE
-ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0
-90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z
-xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu
-QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4
-FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH
-22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP
-xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn
-dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5
-Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b
-nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ
-CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH
-u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj
-d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o=
------END CERTIFICATE-----
-
-# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
-# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
-# Label: "HARICA TLS RSA Root CA 2021"
-# Serial: 76817823531813593706434026085292783742
-# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91
-# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d
-# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d
------BEGIN CERTIFICATE-----
-MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs
-MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
-c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg
-Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL
-MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
-YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv
-b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l
-mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE
-4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv
-a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M
-pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw
-Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b
-LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY
-AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB
-AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq
-E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr
-W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ
-CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF
-MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE
-AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU
-X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3
-f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja
-H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP
-JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P
-zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt
-jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0
-/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT
-BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79
-aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW
-xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU
-63ZTGI0RmLo=
------END CERTIFICATE-----
-
-# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
-# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
-# Label: "HARICA TLS ECC Root CA 2021"
-# Serial: 137515985548005187474074462014555733966
-# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0
-# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48
-# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01
------BEGIN CERTIFICATE-----
-MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw
-CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh
-cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v
-dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG
-A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
-aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg
-Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7
-KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y
-STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw
-AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD
-AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
-SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
-nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
------END CERTIFICATE-----
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/core.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/core.py
old mode 100644
new mode 100755
index 5d2b8cd3..7271acf4
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/core.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/certifi/core.py
@@ -4,57 +4,12 @@
certifi.py
~~~~~~~~~~
-This module returns the installation location of cacert.pem or its contents.
+This module returns the installation location of cacert.pem.
"""
import os
-try:
- from importlib.resources import path as get_path, read_text
- _CACERT_CTX = None
- _CACERT_PATH = None
+def where():
+ f = os.path.dirname(__file__)
- def where():
- # This is slightly terrible, but we want to delay extracting the file
- # in cases where we're inside of a zipimport situation until someone
- # actually calls where(), but we don't want to re-extract the file
- # on every call of where(), so we'll do it once then store it in a
- # global variable.
- global _CACERT_CTX
- global _CACERT_PATH
- if _CACERT_PATH is None:
- # This is slightly janky, the importlib.resources API wants you to
- # manage the cleanup of this file, so it doesn't actually return a
- # path, it returns a context manager that will give you the path
- # when you enter it and will do any cleanup when you leave it. In
- # the common case of not needing a temporary file, it will just
- # return the file system location and the __exit__() is a no-op.
- #
- # We also have to hold onto the actual context manager, because
- # it will do the cleanup whenever it gets garbage collected, so
- # we will also store that at the global level as well.
- _CACERT_CTX = get_path("certifi", "cacert.pem")
- _CACERT_PATH = str(_CACERT_CTX.__enter__())
-
- return _CACERT_PATH
-
-
-except ImportError:
- # This fallback will work for Python versions prior to 3.7 that lack the
- # importlib.resources module but relies on the existing `where` function
- # so won't address issues with environments like PyOxidizer that don't set
- # __file__ on modules.
- def read_text(_module, _path, encoding="ascii"):
- with open(where(), "r", encoding=encoding) as data:
- return data.read()
-
- # If we don't have importlib.resources, then we will just do the old logic
- # of assuming we're on the filesystem and munge the path directly.
- def where():
- f = os.path.dirname(__file__)
-
- return os.path.join(f, "cacert.pem")
-
-
-def contents():
- return read_text("certifi", "cacert.pem", encoding="ascii")
+ return os.path.join(f, 'cacert.pem')
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/__init__.py
new file mode 100755
index 00000000..0f9f820e
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/__init__.py
@@ -0,0 +1,39 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+
+from .compat import PY2, PY3
+from .universaldetector import UniversalDetector
+from .version import __version__, VERSION
+
+
+def detect(byte_str):
+ """
+ Detect the encoding of the given byte string.
+
+ :param byte_str: The byte sequence to examine.
+ :type byte_str: ``bytes`` or ``bytearray``
+ """
+ if not isinstance(byte_str, bytearray):
+ if not isinstance(byte_str, bytes):
+ raise TypeError('Expected object of type bytes or bytearray, got: '
+ '{0}'.format(type(byte_str)))
+ else:
+ byte_str = bytearray(byte_str)
+ detector = UniversalDetector()
+ detector.feed(byte_str)
+ return detector.close()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5freq.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5freq.py
new file mode 100755
index 00000000..38f32517
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5freq.py
@@ -0,0 +1,386 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Big5 frequency table
+# by Taiwan's Mandarin Promotion Council
+#
+#
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+#Char to FreqOrder table
+BIG5_TABLE_SIZE = 5376
+
+BIG5_CHAR_TO_FREQ_ORDER = (
+ 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
+3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
+1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
+ 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
+3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
+4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
+5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
+ 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
+ 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
+ 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
+2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
+1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
+3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
+ 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
+3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
+2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
+ 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
+3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
+1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
+5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
+ 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
+5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
+1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
+ 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
+ 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
+3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
+3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
+ 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
+2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
+2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
+ 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
+ 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
+3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
+1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
+1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
+1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
+2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
+ 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
+4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
+1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
+5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
+2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
+ 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
+ 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
+ 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
+ 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
+5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
+ 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
+1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
+ 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
+ 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
+5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
+1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
+ 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
+3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
+4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
+3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
+ 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
+ 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
+1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
+4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
+3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
+3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
+2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
+5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
+3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
+5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
+1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
+2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
+1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
+ 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
+1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
+4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
+3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
+ 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
+ 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
+ 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
+2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
+5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
+1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
+2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
+1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
+1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
+5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
+5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
+5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
+3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
+4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
+4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
+2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
+5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
+3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
+ 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
+5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
+5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
+1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
+2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
+3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
+4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
+5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
+3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
+4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
+1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
+1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
+4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
+1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
+ 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
+1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
+1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
+3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
+ 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
+5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
+2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
+1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
+1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
+5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
+ 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
+4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
+ 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
+2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
+ 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
+1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
+1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
+ 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
+4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
+4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
+1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
+3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
+5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
+5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
+1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
+2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
+1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
+3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
+2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
+3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
+2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
+4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
+4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
+3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
+ 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
+3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
+ 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
+3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
+4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
+3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
+1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
+5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
+ 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
+5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
+1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
+ 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
+4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
+4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
+ 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
+2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
+2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
+3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
+1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
+4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
+2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
+1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
+1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
+2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
+3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
+1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
+5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
+1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
+4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
+1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
+ 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
+1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
+4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
+4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
+2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
+1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
+4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
+ 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
+5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
+2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
+3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
+4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
+ 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
+5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
+5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
+1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
+4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
+4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
+2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
+3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
+3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
+2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
+1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
+4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
+3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
+3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
+2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
+4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
+5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
+3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
+2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
+3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
+1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
+2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
+3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
+4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
+2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
+2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
+5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
+1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
+2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
+1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
+3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
+4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
+2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
+3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
+3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
+2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
+4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
+2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
+3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
+4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
+5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
+3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
+ 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
+1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
+4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
+1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
+4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
+5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
+ 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
+5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
+5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
+2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
+3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
+2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
+2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
+ 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
+1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
+4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
+3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
+3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
+ 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
+2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
+ 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
+2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
+4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
+1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
+4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
+1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
+3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
+ 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
+3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
+5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
+5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
+3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
+3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
+1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
+2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
+5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
+1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
+1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
+3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
+ 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
+1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
+4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
+5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
+2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
+3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
+ 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
+1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
+2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
+2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
+5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
+5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
+5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
+2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
+2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
+1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
+4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
+3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
+3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
+4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
+4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
+2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
+2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
+5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
+4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
+5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
+4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
+ 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
+ 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
+1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
+3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
+4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
+1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
+5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
+2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
+2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
+3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
+5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
+1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
+3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
+5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
+1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
+5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
+2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
+3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
+2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
+3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
+3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
+3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
+4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
+ 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
+2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
+4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
+3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
+5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
+1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
+5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
+ 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
+1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
+ 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
+4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
+1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
+4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
+1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
+ 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
+3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
+4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
+5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
+ 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
+3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
+ 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
+2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
+)
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5prober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5prober.py
new file mode 100755
index 00000000..98f99701
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/big5prober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import Big5DistributionAnalysis
+from .mbcssm import BIG5_SM_MODEL
+
+
+class Big5Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(Big5Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
+ self.distribution_analyzer = Big5DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "Big5"
+
+ @property
+ def language(self):
+ return "Chinese"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/chardistribution.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/chardistribution.py
new file mode 100755
index 00000000..c0395f4a
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/chardistribution.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
+ EUCTW_TYPICAL_DISTRIBUTION_RATIO)
+from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
+ EUCKR_TYPICAL_DISTRIBUTION_RATIO)
+from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
+ GB2312_TYPICAL_DISTRIBUTION_RATIO)
+from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
+ BIG5_TYPICAL_DISTRIBUTION_RATIO)
+from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
+ JIS_TYPICAL_DISTRIBUTION_RATIO)
+
+
+class CharDistributionAnalysis(object):
+ ENOUGH_DATA_THRESHOLD = 1024
+ SURE_YES = 0.99
+ SURE_NO = 0.01
+ MINIMUM_DATA_THRESHOLD = 3
+
+ def __init__(self):
+ # Mapping table to get frequency order from char order (get from
+ # GetOrder())
+ self._char_to_freq_order = None
+ self._table_size = None # Size of above table
+ # This is a constant value which varies from language to language,
+ # used in calculating confidence. See
+ # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
+ # for further detail.
+ self.typical_distribution_ratio = None
+ self._done = None
+ self._total_chars = None
+ self._freq_chars = None
+ self.reset()
+
+ def reset(self):
+ """reset analyser, clear any state"""
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+ self._total_chars = 0 # Total characters encountered
+ # The number of characters whose frequency order is less than 512
+ self._freq_chars = 0
+
+ def feed(self, char, char_len):
+ """feed a character with known length"""
+ if char_len == 2:
+ # we only care about 2-bytes character in our distribution analysis
+ order = self.get_order(char)
+ else:
+ order = -1
+ if order >= 0:
+ self._total_chars += 1
+ # order is valid
+ if order < self._table_size:
+ if 512 > self._char_to_freq_order[order]:
+ self._freq_chars += 1
+
+ def get_confidence(self):
+ """return confidence based on existing data"""
+ # if we didn't receive any character in our consideration range,
+ # return negative answer
+ if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
+ return self.SURE_NO
+
+ if self._total_chars != self._freq_chars:
+ r = (self._freq_chars / ((self._total_chars - self._freq_chars)
+ * self.typical_distribution_ratio))
+ if r < self.SURE_YES:
+ return r
+
+ # normalize confidence (we don't want to be 100% sure)
+ return self.SURE_YES
+
+ def got_enough_data(self):
+ # It is not necessary to receive all data to draw conclusion.
+ # For charset detection, certain amount of data is enough
+ return self._total_chars > self.ENOUGH_DATA_THRESHOLD
+
+ def get_order(self, byte_str):
+ # We do not handle characters based on the original encoding string,
+ # but convert this encoding string to a number, here called order.
+ # This allows multiple encodings of a language to share one frequency
+ # table.
+ return -1
+
+
+class EUCTWDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCTWDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCTW_TABLE_SIZE
+ self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-TW encoding, we are interested
+ # first byte range: 0xc4 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xC4:
+ return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
+ else:
+ return -1
+
+
+class EUCKRDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCKRDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCKR_TABLE_SIZE
+ self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-KR encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xB0:
+ return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
+ else:
+ return -1
+
+
+class GB2312DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(GB2312DistributionAnalysis, self).__init__()
+ self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
+ self._table_size = GB2312_TABLE_SIZE
+ self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for GB2312 encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if (first_char >= 0xB0) and (second_char >= 0xA1):
+ return 94 * (first_char - 0xB0) + second_char - 0xA1
+ else:
+ return -1
+
+
+class Big5DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(Big5DistributionAnalysis, self).__init__()
+ self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
+ self._table_size = BIG5_TABLE_SIZE
+ self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for big5 encoding, we are interested
+ # first byte range: 0xa4 -- 0xfe
+ # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if first_char >= 0xA4:
+ if second_char >= 0xA1:
+ return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
+ else:
+ return 157 * (first_char - 0xA4) + second_char - 0x40
+ else:
+ return -1
+
+
+class SJISDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(SJISDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for sjis encoding, we are interested
+ # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
+ # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if (first_char >= 0x81) and (first_char <= 0x9F):
+ order = 188 * (first_char - 0x81)
+ elif (first_char >= 0xE0) and (first_char <= 0xEF):
+ order = 188 * (first_char - 0xE0 + 31)
+ else:
+ return -1
+ order = order + second_char - 0x40
+ if second_char > 0x7F:
+ order = -1
+ return order
+
+
+class EUCJPDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCJPDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-JP encoding, we are interested
+ # first byte range: 0xa0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ char = byte_str[0]
+ if char >= 0xA0:
+ return 94 * (char - 0xA1) + byte_str[1] - 0xa1
+ else:
+ return -1
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetgroupprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetgroupprober.py
new file mode 100755
index 00000000..8b3738ef
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetgroupprober.py
@@ -0,0 +1,106 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState
+from .charsetprober import CharSetProber
+
+
+class CharSetGroupProber(CharSetProber):
+ def __init__(self, lang_filter=None):
+ super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
+ self._active_num = 0
+ self.probers = []
+ self._best_guess_prober = None
+
+ def reset(self):
+ super(CharSetGroupProber, self).reset()
+ self._active_num = 0
+ for prober in self.probers:
+ if prober:
+ prober.reset()
+ prober.active = True
+ self._active_num += 1
+ self._best_guess_prober = None
+
+ @property
+ def charset_name(self):
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.charset_name
+
+ @property
+ def language(self):
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.language
+
+ def feed(self, byte_str):
+ for prober in self.probers:
+ if not prober:
+ continue
+ if not prober.active:
+ continue
+ state = prober.feed(byte_str)
+ if not state:
+ continue
+ if state == ProbingState.FOUND_IT:
+ self._best_guess_prober = prober
+ return self.state
+ elif state == ProbingState.NOT_ME:
+ prober.active = False
+ self._active_num -= 1
+ if self._active_num <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ return self.state
+
+ def get_confidence(self):
+ state = self.state
+ if state == ProbingState.FOUND_IT:
+ return 0.99
+ elif state == ProbingState.NOT_ME:
+ return 0.01
+ best_conf = 0.0
+ self._best_guess_prober = None
+ for prober in self.probers:
+ if not prober:
+ continue
+ if not prober.active:
+ self.logger.debug('%s not active', prober.charset_name)
+ continue
+ conf = prober.get_confidence()
+ self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
+ if best_conf < conf:
+ best_conf = conf
+ self._best_guess_prober = prober
+ if not self._best_guess_prober:
+ return 0.0
+ return best_conf
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetprober.py
new file mode 100755
index 00000000..eac4e598
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/charsetprober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+import re
+
+from .enums import ProbingState
+
+
+class CharSetProber(object):
+
+ SHORTCUT_THRESHOLD = 0.95
+
+ def __init__(self, lang_filter=None):
+ self._state = None
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._state = ProbingState.DETECTING
+
+ @property
+ def charset_name(self):
+ return None
+
+ def feed(self, buf):
+ pass
+
+ @property
+ def state(self):
+ return self._state
+
+ def get_confidence(self):
+ return 0.0
+
+ @staticmethod
+ def filter_high_byte_only(buf):
+ buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
+ return buf
+
+ @staticmethod
+ def filter_international_words(buf):
+ """
+ We define three types of bytes:
+ alphabet: english alphabets [a-zA-Z]
+ international: international characters [\x80-\xFF]
+ marker: everything else [^a-zA-Z\x80-\xFF]
+
+ The input buffer can be thought to contain a series of words delimited
+ by markers. This function works to filter all words that contain at
+ least one international character. All contiguous sequences of markers
+ are replaced by a single space ascii character.
+
+ This filter applies to all scripts which do not use English characters.
+ """
+ filtered = bytearray()
+
+ # This regex expression filters out only words that have at-least one
+ # international character. The word may include one marker character at
+ # the end.
+ words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
+ buf)
+
+ for word in words:
+ filtered.extend(word[:-1])
+
+ # If the last character in the word is a marker, replace it with a
+ # space as markers shouldn't affect our analysis (they are used
+ # similarly across all languages and may thus have similar
+ # frequencies).
+ last_char = word[-1:]
+ if not last_char.isalpha() and last_char < b'\x80':
+ last_char = b' '
+ filtered.extend(last_char)
+
+ return filtered
+
+ @staticmethod
+ def filter_with_english_letters(buf):
+ """
+ Returns a copy of ``buf`` that retains only the sequences of English
+ alphabet and high byte characters that are not between <> characters.
+ Also retains English alphabet and high byte characters immediately
+ before occurrences of >.
+
+ This filter can be applied to all scripts which contain both English
+ characters and extended ASCII characters, but is currently only used by
+ ``Latin1Prober``.
+ """
+ filtered = bytearray()
+ in_tag = False
+ prev = 0
+
+ for curr in range(len(buf)):
+ # Slice here to get bytes instead of an int with Python 3
+ buf_char = buf[curr:curr + 1]
+ # Check if we're coming out of or entering an HTML tag
+ if buf_char == b'>':
+ in_tag = False
+ elif buf_char == b'<':
+ in_tag = True
+
+ # If current character is not extended-ASCII and not alphabetic...
+ if buf_char < b'\x80' and not buf_char.isalpha():
+ # ...and we're not in a tag
+ if curr > prev and not in_tag:
+ # Keep everything after last non-extended-ASCII,
+ # non-alphabetic character
+ filtered.extend(buf[prev:curr])
+ # Output a space to delimit stretch we kept
+ filtered.extend(b' ')
+ prev = curr + 1
+
+ # If we're not in a tag...
+ if not in_tag:
+ # Keep everything after last non-extended-ASCII, non-alphabetic
+ # character
+ filtered.extend(buf[prev:])
+
+ return filtered
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/__init__.py
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/__init__.py
@@ -0,0 +1 @@
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/chardetect.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/chardetect.py
new file mode 100755
index 00000000..f0a4cc5d
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cli/chardetect.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Script which takes one or more file paths and reports on their detected
+encodings
+
+Example::
+
+ % chardetect somefile someotherfile
+ somefile: windows-1252 with confidence 0.5
+ someotherfile: ascii with confidence 1.0
+
+If no paths are provided, it takes its input from stdin.
+
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import sys
+
+from chardet import __version__
+from chardet.compat import PY2
+from chardet.universaldetector import UniversalDetector
+
+
+def description_of(lines, name='stdin'):
+ """
+ Return a string describing the probable encoding of a file or
+ list of strings.
+
+ :param lines: The lines to get the encoding of.
+ :type lines: Iterable of bytes
+ :param name: Name of file or collection of lines
+ :type name: str
+ """
+ u = UniversalDetector()
+ for line in lines:
+ line = bytearray(line)
+ u.feed(line)
+ # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
+ if u.done:
+ break
+ u.close()
+ result = u.result
+ if PY2:
+ name = name.decode(sys.getfilesystemencoding(), 'ignore')
+ if result['encoding']:
+ return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
+ result['confidence'])
+ else:
+ return '{0}: no result'.format(name)
+
+
+def main(argv=None):
+ """
+ Handles command line arguments and gets things started.
+
+ :param argv: List of arguments, as if specified on the command-line.
+ If None, ``sys.argv[1:]`` is used instead.
+ :type argv: list of str
+ """
+ # Get command line arguments
+ parser = argparse.ArgumentParser(
+ description="Takes one or more file paths and reports their detected \
+ encodings")
+ parser.add_argument('input',
+ help='File whose encoding we would like to determine. \
+ (default: stdin)',
+ type=argparse.FileType('rb'), nargs='*',
+ default=[sys.stdin if PY2 else sys.stdin.buffer])
+ parser.add_argument('--version', action='version',
+ version='%(prog)s {0}'.format(__version__))
+ args = parser.parse_args(argv)
+
+ for f in args.input:
+ if f.isatty():
+ print("You are running chardetect interactively. Press " +
+ "CTRL-D twice at the start of a blank line to signal the " +
+ "end of your input. If you want help, run chardetect " +
+ "--help\n", file=sys.stderr)
+ print(description_of(f, f.name))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/codingstatemachine.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/codingstatemachine.py
new file mode 100755
index 00000000..68fba44f
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/codingstatemachine.py
@@ -0,0 +1,88 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+
+from .enums import MachineState
+
+
+class CodingStateMachine(object):
+ """
+ A state machine to verify a byte sequence for a particular encoding. For
+ each byte the detector receives, it will feed that byte to every active
+ state machine available, one byte at a time. The state machine changes its
+ state based on its previous state and the byte it receives. There are 3
+ states in a state machine that are of interest to an auto-detector:
+
+ START state: This is the state to start with, or a legal byte sequence
+ (i.e. a valid code point) for character has been identified.
+
+ ME state: This indicates that the state machine identified a byte sequence
+ that is specific to the charset it is designed for and that
+ there is no other possible encoding which can contain this byte
+ sequence. This will to lead to an immediate positive answer for
+ the detector.
+
+ ERROR state: This indicates the state machine identified an illegal byte
+ sequence for that encoding. This will lead to an immediate
+ negative answer for this encoding. Detector will exclude this
+ encoding from consideration from here on.
+ """
+ def __init__(self, sm):
+ self._model = sm
+ self._curr_byte_pos = 0
+ self._curr_char_len = 0
+ self._curr_state = None
+ self.logger = logging.getLogger(__name__)
+ self.reset()
+
+ def reset(self):
+ self._curr_state = MachineState.START
+
+ def next_state(self, c):
+ # for each byte we get its class
+ # if it is first byte, we also get byte length
+ byte_class = self._model['class_table'][c]
+ if self._curr_state == MachineState.START:
+ self._curr_byte_pos = 0
+ self._curr_char_len = self._model['char_len_table'][byte_class]
+ # from byte's class and state_table, we get its next state
+ curr_state = (self._curr_state * self._model['class_factor']
+ + byte_class)
+ self._curr_state = self._model['state_table'][curr_state]
+ self._curr_byte_pos += 1
+ return self._curr_state
+
+ def get_current_charlen(self):
+ return self._curr_char_len
+
+ def get_coding_state_machine(self):
+ return self._model['name']
+
+ @property
+ def language(self):
+ return self._model['language']
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/compat.py
new file mode 100755
index 00000000..ddd74687
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/compat.py
@@ -0,0 +1,34 @@
+######################## BEGIN LICENSE BLOCK ########################
+# Contributor(s):
+# Dan Blanchard
+# Ian Cordasco
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import sys
+
+
+if sys.version_info < (3, 0):
+ PY2 = True
+ PY3 = False
+ base_str = (str, unicode)
+ text_type = unicode
+else:
+ PY2 = False
+ PY3 = True
+ base_str = (bytes, str)
+ text_type = str
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cp949prober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cp949prober.py
new file mode 100755
index 00000000..efd793ab
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/cp949prober.py
@@ -0,0 +1,49 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCKRDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import CP949_SM_MODEL
+
+
+class CP949Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(CP949Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
+ # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
+ # not different.
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "CP949"
+
+ @property
+ def language(self):
+ return "Korean"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/enums.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/enums.py
new file mode 100755
index 00000000..04512072
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/enums.py
@@ -0,0 +1,76 @@
+"""
+All of the Enums that are used throughout the chardet package.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+
+class InputState(object):
+ """
+ This enum represents the different states a universal detector can be in.
+ """
+ PURE_ASCII = 0
+ ESC_ASCII = 1
+ HIGH_BYTE = 2
+
+
+class LanguageFilter(object):
+ """
+ This enum represents the different language filters we can apply to a
+ ``UniversalDetector``.
+ """
+ CHINESE_SIMPLIFIED = 0x01
+ CHINESE_TRADITIONAL = 0x02
+ JAPANESE = 0x04
+ KOREAN = 0x08
+ NON_CJK = 0x10
+ ALL = 0x1F
+ CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
+ CJK = CHINESE | JAPANESE | KOREAN
+
+
+class ProbingState(object):
+ """
+ This enum represents the different states a prober can be in.
+ """
+ DETECTING = 0
+ FOUND_IT = 1
+ NOT_ME = 2
+
+
+class MachineState(object):
+ """
+ This enum represents the different states a state machine can be in.
+ """
+ START = 0
+ ERROR = 1
+ ITS_ME = 2
+
+
+class SequenceLikelihood(object):
+ """
+ This enum represents the likelihood of a character following the previous one.
+ """
+ NEGATIVE = 0
+ UNLIKELY = 1
+ LIKELY = 2
+ POSITIVE = 3
+
+ @classmethod
+ def get_num_categories(cls):
+ """:returns: The number of likelihood categories in the enum."""
+ return 4
+
+
+class CharacterCategory(object):
+ """
+ This enum represents the different categories language models for
+ ``SingleByteCharsetProber`` put characters into.
+
+ Anything less than CONTROL is considered a letter.
+ """
+ UNDEFINED = 255
+ LINE_BREAK = 254
+ SYMBOL = 253
+ DIGIT = 252
+ CONTROL = 251
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escprober.py
new file mode 100755
index 00000000..c70493f2
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escprober.py
@@ -0,0 +1,101 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import LanguageFilter, ProbingState, MachineState
+from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
+ ISO2022KR_SM_MODEL)
+
+
+class EscCharSetProber(CharSetProber):
+ """
+ This CharSetProber uses a "code scheme" approach for detecting encodings,
+ whereby easily recognizable escape or shift sequences are relied on to
+ identify these encodings.
+ """
+
+ def __init__(self, lang_filter=None):
+ super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
+ self.coding_sm = []
+ if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
+ self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
+ self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
+ if self.lang_filter & LanguageFilter.JAPANESE:
+ self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
+ if self.lang_filter & LanguageFilter.KOREAN:
+ self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
+ self.active_sm_count = None
+ self._detected_charset = None
+ self._detected_language = None
+ self._state = None
+ self.reset()
+
+ def reset(self):
+ super(EscCharSetProber, self).reset()
+ for coding_sm in self.coding_sm:
+ if not coding_sm:
+ continue
+ coding_sm.active = True
+ coding_sm.reset()
+ self.active_sm_count = len(self.coding_sm)
+ self._detected_charset = None
+ self._detected_language = None
+
+ @property
+ def charset_name(self):
+ return self._detected_charset
+
+ @property
+ def language(self):
+ return self._detected_language
+
+ def get_confidence(self):
+ if self._detected_charset:
+ return 0.99
+ else:
+ return 0.00
+
+ def feed(self, byte_str):
+ for c in byte_str:
+ for coding_sm in self.coding_sm:
+ if not coding_sm or not coding_sm.active:
+ continue
+ coding_state = coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ coding_sm.active = False
+ self.active_sm_count -= 1
+ if self.active_sm_count <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ self._detected_charset = coding_sm.get_coding_state_machine()
+ self._detected_language = coding_sm.language
+ return self.state
+
+ return self.state
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escsm.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escsm.py
new file mode 100755
index 00000000..0069523a
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/escsm.py
@@ -0,0 +1,246 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+HZ_CLS = (
+1,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,0,0,0,0, # 20 - 27
+0,0,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,0,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,4,0,5,2,0, # 78 - 7f
+1,1,1,1,1,1,1,1, # 80 - 87
+1,1,1,1,1,1,1,1, # 88 - 8f
+1,1,1,1,1,1,1,1, # 90 - 97
+1,1,1,1,1,1,1,1, # 98 - 9f
+1,1,1,1,1,1,1,1, # a0 - a7
+1,1,1,1,1,1,1,1, # a8 - af
+1,1,1,1,1,1,1,1, # b0 - b7
+1,1,1,1,1,1,1,1, # b8 - bf
+1,1,1,1,1,1,1,1, # c0 - c7
+1,1,1,1,1,1,1,1, # c8 - cf
+1,1,1,1,1,1,1,1, # d0 - d7
+1,1,1,1,1,1,1,1, # d8 - df
+1,1,1,1,1,1,1,1, # e0 - e7
+1,1,1,1,1,1,1,1, # e8 - ef
+1,1,1,1,1,1,1,1, # f0 - f7
+1,1,1,1,1,1,1,1, # f8 - ff
+)
+
+HZ_ST = (
+MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17
+ 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f
+ 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27
+ 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
+)
+
+HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+HZ_SM_MODEL = {'class_table': HZ_CLS,
+ 'class_factor': 6,
+ 'state_table': HZ_ST,
+ 'char_len_table': HZ_CHAR_LEN_TABLE,
+ 'name': "HZ-GB-2312",
+ 'language': 'Chinese'}
+
+ISO2022CN_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,0,0,0,0, # 20 - 27
+0,3,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,4,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022CN_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
+ 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
+)
+
+ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
+ 'class_factor': 9,
+ 'state_table': ISO2022CN_ST,
+ 'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-CN",
+ 'language': 'Chinese'}
+
+ISO2022JP_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,2,2, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,7,0,0,0, # 20 - 27
+3,0,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+6,0,4,0,8,0,0,0, # 40 - 47
+0,9,5,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022JP_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
+)
+
+ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
+ 'class_factor': 10,
+ 'state_table': ISO2022JP_ST,
+ 'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-JP",
+ 'language': 'Japanese'}
+
+ISO2022KR_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,3,0,0,0, # 20 - 27
+0,4,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,5,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022KR_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
+)
+
+ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
+ 'class_factor': 6,
+ 'state_table': ISO2022KR_ST,
+ 'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-KR",
+ 'language': 'Korean'}
+
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/eucjpprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/eucjpprober.py
new file mode 100755
index 00000000..20ce8f7d
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/eucjpprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState, MachineState
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCJPDistributionAnalysis
+from .jpcntx import EUCJPContextAnalysis
+from .mbcssm import EUCJP_SM_MODEL
+
+
+class EUCJPProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCJPProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
+ self.distribution_analyzer = EUCJPDistributionAnalysis()
+ self.context_analyzer = EUCJPContextAnalysis()
+ self.reset()
+
+ def reset(self):
+ super(EUCJPProber, self).reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-JP"
+
+ @property
+ def language(self):
+ return "Japanese"
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.context_analyzer.feed(self._last_char, char_len)
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.context_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrfreq.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrfreq.py
new file mode 100755
index 00000000..b68078cb
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrfreq.py
@@ -0,0 +1,195 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+
+# 128 --> 0.79
+# 256 --> 0.92
+# 512 --> 0.986
+# 1024 --> 0.99944
+# 2048 --> 0.99999
+#
+# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
+# Random Distribution Ration = 512 / (2350-512) = 0.279.
+#
+# Typical Distribution Ratio
+
+EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
+
+EUCKR_TABLE_SIZE = 2352
+
+# Char to FreqOrder table ,
+EUCKR_CHAR_TO_FREQ_ORDER = (
+ 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
+1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
+1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
+ 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
+ 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
+ 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
+1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
+ 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
+ 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
+1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
+1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
+1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
+1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
+1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
+ 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
+1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
+1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
+1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
+1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
+ 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
+1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
+ 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
+ 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
+1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
+ 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
+1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
+ 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
+ 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
+1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
+1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
+1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
+1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
+ 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
+1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
+ 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
+ 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
+1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
+1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
+1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
+1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
+1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
+1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
+ 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
+ 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
+ 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
+1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
+ 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
+1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
+ 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
+ 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
+2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
+ 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
+ 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
+2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
+2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
+2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
+ 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
+ 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
+2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
+ 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
+1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
+2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
+1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
+2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
+2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
+1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
+ 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
+2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
+2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
+ 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
+ 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
+2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
+1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
+2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
+2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
+2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
+2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
+2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
+2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
+1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
+2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
+2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
+2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
+2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
+2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
+1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
+1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
+2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
+1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
+2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
+1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
+ 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
+2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
+ 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
+2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
+ 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
+2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
+2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
+ 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
+2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
+1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
+ 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
+1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
+2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
+1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
+2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
+ 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
+2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
+1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
+2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
+1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
+2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
+1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
+ 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
+2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
+2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
+ 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
+ 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
+1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
+1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
+ 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
+2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
+2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
+ 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
+ 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
+ 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
+2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
+ 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
+ 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
+2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
+2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
+ 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
+2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
+1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
+ 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
+2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
+2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
+2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
+ 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
+ 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
+ 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
+2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
+2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
+2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
+1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
+2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
+ 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
+)
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrprober.py
new file mode 100755
index 00000000..345a060d
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euckrprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCKRDistributionAnalysis
+from .mbcssm import EUCKR_SM_MODEL
+
+
+class EUCKRProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCKRProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-KR"
+
+ @property
+ def language(self):
+ return "Korean"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwfreq.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwfreq.py
new file mode 100755
index 00000000..ed7a995a
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwfreq.py
@@ -0,0 +1,387 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# EUCTW frequency table
+# Converted from big5 work
+# by Taiwan's Mandarin Promotion Council
+#
+
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+# Char to FreqOrder table ,
+EUCTW_TABLE_SIZE = 5376
+
+EUCTW_CHAR_TO_FREQ_ORDER = (
+ 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
+3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
+1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
+ 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
+3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
+4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
+7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
+ 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
+ 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
+ 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
+2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
+1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
+3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
+ 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
+3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
+2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
+ 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
+3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
+1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
+7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
+ 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
+7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
+1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
+ 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
+ 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
+3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
+3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
+ 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
+2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
+2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
+ 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
+ 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
+3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
+1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
+1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
+1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
+2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
+ 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
+4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
+1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
+7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
+2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
+ 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
+ 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
+ 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
+ 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
+7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
+ 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
+1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
+ 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
+ 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
+7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
+1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
+ 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
+3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
+4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
+3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
+ 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
+ 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
+1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
+4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
+3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
+3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
+2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
+7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
+3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
+7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
+1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
+2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
+1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
+ 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
+1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
+4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
+3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
+ 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
+ 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
+ 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
+2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
+7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
+1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
+2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
+1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
+1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
+7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
+7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
+7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
+3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
+4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
+1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
+7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
+2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
+7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
+3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
+3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
+7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
+2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
+7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
+ 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
+4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
+2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
+7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
+3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
+2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
+2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
+ 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
+2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
+1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
+1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
+2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
+1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
+7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
+7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
+2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
+4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
+1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
+7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
+ 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
+4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
+ 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
+2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
+ 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
+1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
+1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
+ 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
+3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
+3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
+1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
+3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
+7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
+7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
+1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
+2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
+1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
+3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
+2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
+3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
+2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
+4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
+4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
+3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
+ 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
+3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
+ 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
+3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
+3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
+3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
+1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
+7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
+ 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
+7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
+1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
+ 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
+4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
+3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
+ 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
+2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
+2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
+3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
+1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
+4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
+2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
+1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
+1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
+2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
+3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
+1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
+7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
+1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
+4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
+1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
+ 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
+1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
+3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
+3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
+2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
+1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
+4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
+ 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
+7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
+2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
+3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
+4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
+ 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
+7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
+7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
+1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
+4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
+3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
+2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
+3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
+3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
+2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
+1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
+4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
+3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
+3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
+2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
+4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
+7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
+3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
+2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
+3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
+1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
+2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
+3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
+4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
+2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
+2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
+7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
+1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
+2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
+1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
+3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
+4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
+2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
+3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
+3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
+2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
+4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
+2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
+3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
+4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
+7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
+3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
+ 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
+1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
+4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
+1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
+4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
+7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
+ 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
+7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
+2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
+1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
+1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
+3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
+ 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
+ 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
+ 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
+3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
+2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
+ 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
+7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
+1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
+3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
+7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
+1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
+7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
+4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
+1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
+2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
+2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
+4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
+ 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
+ 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
+3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
+3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
+1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
+2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
+7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
+1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
+1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
+3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
+ 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
+1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
+4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
+7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
+2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
+3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
+ 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
+1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
+2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
+2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
+7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
+7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
+7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
+2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
+2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
+1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
+4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
+3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
+3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
+4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
+4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
+2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
+2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
+7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
+4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
+7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
+2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
+1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
+3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
+4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
+2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
+ 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
+2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
+1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
+2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
+2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
+4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
+7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
+1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
+3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
+7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
+1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
+8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
+2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
+8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
+2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
+2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
+8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
+8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
+8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
+ 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
+8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
+4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
+3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
+8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
+1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
+8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
+ 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
+1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
+ 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
+4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
+1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
+4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
+1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
+ 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
+3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
+4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
+8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
+ 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
+3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
+ 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
+2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
+)
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwprober.py
new file mode 100755
index 00000000..35669cc4
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/euctwprober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCTWDistributionAnalysis
+from .mbcssm import EUCTW_SM_MODEL
+
+class EUCTWProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCTWProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
+ self.distribution_analyzer = EUCTWDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-TW"
+
+ @property
+ def language(self):
+ return "Taiwan"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312freq.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312freq.py
new file mode 100755
index 00000000..697837bd
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312freq.py
@@ -0,0 +1,283 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# GB2312 most frequently used character table
+#
+# Char to FreqOrder table , from hz6763
+
+# 512 --> 0.79 -- 0.79
+# 1024 --> 0.92 -- 0.13
+# 2048 --> 0.98 -- 0.06
+# 6768 --> 1.00 -- 0.02
+#
+# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
+# Random Distribution Ration = 512 / (3755 - 512) = 0.157
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
+
+GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
+
+GB2312_TABLE_SIZE = 3760
+
+GB2312_CHAR_TO_FREQ_ORDER = (
+1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
+2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
+2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
+ 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
+1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
+1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
+ 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
+1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
+2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
+3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
+ 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
+1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
+ 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
+2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
+ 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
+2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
+1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
+3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
+ 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
+1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
+ 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
+2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
+1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
+3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
+1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
+2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
+1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
+ 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
+3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
+3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
+ 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
+3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
+ 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
+1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
+3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
+2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
+1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
+ 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
+1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
+4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
+ 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
+3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
+3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
+ 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
+1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
+2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
+1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
+1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
+ 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
+3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
+3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
+4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
+ 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
+3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
+1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
+1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
+4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
+ 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
+ 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
+3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
+1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
+ 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
+1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
+2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
+ 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
+ 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
+ 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
+3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
+4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
+3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
+ 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
+2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
+2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
+2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
+ 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
+2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
+ 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
+ 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
+ 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
+3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
+2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
+2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
+1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
+ 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
+2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
+ 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
+ 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
+1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
+1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
+ 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
+ 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
+1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
+2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
+3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
+2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
+2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
+2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
+3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
+1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
+1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
+2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
+1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
+3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
+1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
+1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
+3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
+ 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
+2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
+1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
+4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
+1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
+1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
+3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
+1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
+ 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
+ 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
+1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
+ 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
+1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
+1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
+ 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
+3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
+4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
+3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
+2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
+2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
+1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
+3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
+2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
+1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
+1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
+ 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
+2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
+2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
+3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
+4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
+3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
+ 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
+3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
+2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
+1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
+ 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
+ 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
+3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
+4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
+2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
+1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
+1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
+ 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
+1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
+3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
+ 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
+ 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
+1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
+ 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
+1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
+ 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
+2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
+ 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
+2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
+2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
+1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
+1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
+2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
+ 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
+1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
+1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
+2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
+2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
+3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
+1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
+4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
+ 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
+ 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
+3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
+1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
+ 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
+3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
+1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
+4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
+1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
+2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
+1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
+ 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
+1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
+3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
+ 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
+2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
+ 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
+1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
+1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
+1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
+3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
+2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
+3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
+3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
+3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
+ 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
+2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
+ 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
+2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
+ 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
+1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
+ 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
+ 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
+1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
+3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
+3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
+1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
+1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
+3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
+2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
+2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
+1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
+3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
+ 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
+4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
+1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
+2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
+3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
+3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
+1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
+ 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
+ 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
+2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
+ 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
+1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
+ 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
+1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
+1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
+1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
+1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
+1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
+ 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
+ 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
+)
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312prober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312prober.py
new file mode 100755
index 00000000..8446d2dd
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/gb2312prober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import GB2312DistributionAnalysis
+from .mbcssm import GB2312_SM_MODEL
+
+class GB2312Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(GB2312Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
+ self.distribution_analyzer = GB2312DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "GB2312"
+
+ @property
+ def language(self):
+ return "Chinese"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/hebrewprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/hebrewprober.py
new file mode 100755
index 00000000..b0e1bf49
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/hebrewprober.py
@@ -0,0 +1,292 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Shy Shalom
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+# This prober doesn't actually recognize a language or a charset.
+# It is a helper prober for the use of the Hebrew model probers
+
+### General ideas of the Hebrew charset recognition ###
+#
+# Four main charsets exist in Hebrew:
+# "ISO-8859-8" - Visual Hebrew
+# "windows-1255" - Logical Hebrew
+# "ISO-8859-8-I" - Logical Hebrew
+# "x-mac-hebrew" - ?? Logical Hebrew ??
+#
+# Both "ISO" charsets use a completely identical set of code points, whereas
+# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
+# these code points. windows-1255 defines additional characters in the range
+# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
+# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
+# x-mac-hebrew defines similar additional code points but with a different
+# mapping.
+#
+# As far as an average Hebrew text with no diacritics is concerned, all four
+# charsets are identical with respect to code points. Meaning that for the
+# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
+# (including final letters).
+#
+# The dominant difference between these charsets is their directionality.
+# "Visual" directionality means that the text is ordered as if the renderer is
+# not aware of a BIDI rendering algorithm. The renderer sees the text and
+# draws it from left to right. The text itself when ordered naturally is read
+# backwards. A buffer of Visual Hebrew generally looks like so:
+# "[last word of first line spelled backwards] [whole line ordered backwards
+# and spelled backwards] [first word of first line spelled backwards]
+# [end of line] [last word of second line] ... etc' "
+# adding punctuation marks, numbers and English text to visual text is
+# naturally also "visual" and from left to right.
+#
+# "Logical" directionality means the text is ordered "naturally" according to
+# the order it is read. It is the responsibility of the renderer to display
+# the text from right to left. A BIDI algorithm is used to place general
+# punctuation marks, numbers and English text in the text.
+#
+# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
+# what little evidence I could find, it seems that its general directionality
+# is Logical.
+#
+# To sum up all of the above, the Hebrew probing mechanism knows about two
+# charsets:
+# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
+# backwards while line order is natural. For charset recognition purposes
+# the line order is unimportant (In fact, for this implementation, even
+# word order is unimportant).
+# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
+#
+# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
+# specifically identified.
+# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
+# that contain special punctuation marks or diacritics is displayed with
+# some unconverted characters showing as question marks. This problem might
+# be corrected using another model prober for x-mac-hebrew. Due to the fact
+# that x-mac-hebrew texts are so rare, writing another model prober isn't
+# worth the effort and performance hit.
+#
+#### The Prober ####
+#
+# The prober is divided between two SBCharSetProbers and a HebrewProber,
+# all of which are managed, created, fed data, inquired and deleted by the
+# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
+# fact some kind of Hebrew, Logical or Visual. The final decision about which
+# one is it is made by the HebrewProber by combining final-letter scores
+# with the scores of the two SBCharSetProbers to produce a final answer.
+#
+# The SBCSGroupProber is responsible for stripping the original text of HTML
+# tags, English characters, numbers, low-ASCII punctuation characters, spaces
+# and new lines. It reduces any sequence of such characters to a single space.
+# The buffer fed to each prober in the SBCS group prober is pure text in
+# high-ASCII.
+# The two SBCharSetProbers (model probers) share the same language model:
+# Win1255Model.
+# The first SBCharSetProber uses the model normally as any other
+# SBCharSetProber does, to recognize windows-1255, upon which this model was
+# built. The second SBCharSetProber is told to make the pair-of-letter
+# lookup in the language model backwards. This in practice exactly simulates
+# a visual Hebrew model using the windows-1255 logical Hebrew model.
+#
+# The HebrewProber is not using any language model. All it does is look for
+# final-letter evidence suggesting the text is either logical Hebrew or visual
+# Hebrew. Disjointed from the model probers, the results of the HebrewProber
+# alone are meaningless. HebrewProber always returns 0.00 as confidence
+# since it never identifies a charset by itself. Instead, the pointer to the
+# HebrewProber is passed to the model probers as a helper "Name Prober".
+# When the Group prober receives a positive identification from any prober,
+# it asks for the name of the charset identified. If the prober queried is a
+# Hebrew model prober, the model prober forwards the call to the
+# HebrewProber to make the final decision. In the HebrewProber, the
+# decision is made according to the final-letters scores maintained and Both
+# model probers scores. The answer is returned in the form of the name of the
+# charset identified, either "windows-1255" or "ISO-8859-8".
+
+class HebrewProber(CharSetProber):
+ # windows-1255 / ISO-8859-8 code points of interest
+ FINAL_KAF = 0xea
+ NORMAL_KAF = 0xeb
+ FINAL_MEM = 0xed
+ NORMAL_MEM = 0xee
+ FINAL_NUN = 0xef
+ NORMAL_NUN = 0xf0
+ FINAL_PE = 0xf3
+ NORMAL_PE = 0xf4
+ FINAL_TSADI = 0xf5
+ NORMAL_TSADI = 0xf6
+
+ # Minimum Visual vs Logical final letter score difference.
+ # If the difference is below this, don't rely solely on the final letter score
+ # distance.
+ MIN_FINAL_CHAR_DISTANCE = 5
+
+ # Minimum Visual vs Logical model score difference.
+ # If the difference is below this, don't rely at all on the model score
+ # distance.
+ MIN_MODEL_DISTANCE = 0.01
+
+ VISUAL_HEBREW_NAME = "ISO-8859-8"
+ LOGICAL_HEBREW_NAME = "windows-1255"
+
+ def __init__(self):
+ super(HebrewProber, self).__init__()
+ self._final_char_logical_score = None
+ self._final_char_visual_score = None
+ self._prev = None
+ self._before_prev = None
+ self._logical_prober = None
+ self._visual_prober = None
+ self.reset()
+
+ def reset(self):
+ self._final_char_logical_score = 0
+ self._final_char_visual_score = 0
+ # The two last characters seen in the previous buffer,
+ # mPrev and mBeforePrev are initialized to space in order to simulate
+ # a word delimiter at the beginning of the data
+ self._prev = ' '
+ self._before_prev = ' '
+ # These probers are owned by the group prober.
+
+ def set_model_probers(self, logicalProber, visualProber):
+ self._logical_prober = logicalProber
+ self._visual_prober = visualProber
+
+ def is_final(self, c):
+ return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
+ self.FINAL_PE, self.FINAL_TSADI]
+
+ def is_non_final(self, c):
+ # The normal Tsadi is not a good Non-Final letter due to words like
+ # 'lechotet' (to chat) containing an apostrophe after the tsadi. This
+ # apostrophe is converted to a space in FilterWithoutEnglishLetters
+ # causing the Non-Final tsadi to appear at an end of a word even
+ # though this is not the case in the original text.
+ # The letters Pe and Kaf rarely display a related behavior of not being
+ # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
+ # for example legally end with a Non-Final Pe or Kaf. However, the
+ # benefit of these letters as Non-Final letters outweighs the damage
+ # since these words are quite rare.
+ return c in [self.NORMAL_KAF, self.NORMAL_MEM,
+ self.NORMAL_NUN, self.NORMAL_PE]
+
+ def feed(self, byte_str):
+ # Final letter analysis for logical-visual decision.
+ # Look for evidence that the received buffer is either logical Hebrew
+ # or visual Hebrew.
+ # The following cases are checked:
+ # 1) A word longer than 1 letter, ending with a final letter. This is
+ # an indication that the text is laid out "naturally" since the
+ # final letter really appears at the end. +1 for logical score.
+ # 2) A word longer than 1 letter, ending with a Non-Final letter. In
+ # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
+ # should not end with the Non-Final form of that letter. Exceptions
+ # to this rule are mentioned above in isNonFinal(). This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score
+ # 3) A word longer than 1 letter, starting with a final letter. Final
+ # letters should not appear at the beginning of a word. This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score.
+ #
+ # The visual score and logical score are accumulated throughout the
+ # text and are finally checked against each other in GetCharSetName().
+ # No checking for final letters in the middle of words is done since
+ # that case is not an indication for either Logical or Visual text.
+ #
+ # We automatically filter out all 7-bit characters (replace them with
+ # spaces) so the word boundary detection works properly. [MAP]
+
+ if self.state == ProbingState.NOT_ME:
+ # Both model probers say it's not them. No reason to continue.
+ return ProbingState.NOT_ME
+
+ byte_str = self.filter_high_byte_only(byte_str)
+
+ for cur in byte_str:
+ if cur == ' ':
+ # We stand on a space - a word just ended
+ if self._before_prev != ' ':
+ # next-to-last char was not a space so self._prev is not a
+ # 1 letter word
+ if self.is_final(self._prev):
+ # case (1) [-2:not space][-1:final letter][cur:space]
+ self._final_char_logical_score += 1
+ elif self.is_non_final(self._prev):
+ # case (2) [-2:not space][-1:Non-Final letter][
+ # cur:space]
+ self._final_char_visual_score += 1
+ else:
+ # Not standing on a space
+ if ((self._before_prev == ' ') and
+ (self.is_final(self._prev)) and (cur != ' ')):
+ # case (3) [-2:space][-1:final letter][cur:not space]
+ self._final_char_visual_score += 1
+ self._before_prev = self._prev
+ self._prev = cur
+
+ # Forever detecting, till the end or until both model probers return
+ # ProbingState.NOT_ME (handled above)
+ return ProbingState.DETECTING
+
+ @property
+ def charset_name(self):
+ # Make the decision: is it Logical or Visual?
+ # If the final letter score distance is dominant enough, rely on it.
+ finalsub = self._final_char_logical_score - self._final_char_visual_score
+ if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # It's not dominant enough, try to rely on the model scores instead.
+ modelsub = (self._logical_prober.get_confidence()
+ - self._visual_prober.get_confidence())
+ if modelsub > self.MIN_MODEL_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if modelsub < -self.MIN_MODEL_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # Still no good, back to final letter distance, maybe it'll save the
+ # day.
+ if finalsub < 0.0:
+ return self.VISUAL_HEBREW_NAME
+
+ # (finalsub > 0 - Logical) or (don't know what to do) default to
+ # Logical.
+ return self.LOGICAL_HEBREW_NAME
+
+ @property
+ def language(self):
+ return 'Hebrew'
+
+ @property
+ def state(self):
+ # Remain active as long as any of the model probers are active.
+ if (self._logical_prober.state == ProbingState.NOT_ME) and \
+ (self._visual_prober.state == ProbingState.NOT_ME):
+ return ProbingState.NOT_ME
+ return ProbingState.DETECTING
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jisfreq.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jisfreq.py
new file mode 100755
index 00000000..83fc082b
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jisfreq.py
@@ -0,0 +1,325 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+#
+# Japanese frequency table, applied to both S-JIS and EUC-JP
+# They are sorted in order.
+
+# 128 --> 0.77094
+# 256 --> 0.85710
+# 512 --> 0.92635
+# 1024 --> 0.97130
+# 2048 --> 0.99431
+#
+# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
+# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
+#
+# Typical Distribution Ratio, 25% of IDR
+
+JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
+
+# Char to FreqOrder table ,
+JIS_TABLE_SIZE = 4368
+
+JIS_CHAR_TO_FREQ_ORDER = (
+ 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
+3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
+1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
+2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
+2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
+5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
+1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
+5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
+5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
+5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
+5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
+5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
+5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
+1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
+1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
+1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
+2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
+3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
+3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
+ 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
+ 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
+1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
+ 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
+5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
+ 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
+ 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
+ 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
+ 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
+ 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
+5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
+5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
+5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
+4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
+5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
+5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
+5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
+5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
+5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
+5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
+5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
+5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
+5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
+3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
+5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
+5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
+5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
+5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
+5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
+5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
+5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
+5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
+5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
+5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
+5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
+5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
+5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
+5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
+5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
+5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
+5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
+5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
+5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
+5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
+5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
+5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
+5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
+5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
+5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
+5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
+5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
+5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
+5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
+5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
+5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
+5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
+5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
+5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
+5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
+5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
+5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
+5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
+6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
+6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
+6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
+6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
+6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
+6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
+6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
+6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
+4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
+ 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
+ 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
+1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
+1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
+ 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
+3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
+3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
+ 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
+3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
+3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
+ 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
+2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
+ 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
+3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
+1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
+ 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
+1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
+ 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
+2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
+2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
+2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
+2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
+1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
+1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
+1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
+1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
+2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
+1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
+2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
+1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
+1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
+1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
+1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
+1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
+1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
+ 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
+ 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
+1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
+2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
+2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
+2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
+3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
+3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
+ 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
+3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
+1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
+ 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
+2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
+1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
+ 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
+3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
+4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
+2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
+1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
+2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
+1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
+ 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
+ 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
+1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
+2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
+2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
+2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
+3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
+1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
+2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
+ 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
+ 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
+ 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
+1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
+2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
+ 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
+1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
+1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
+ 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
+1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
+1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
+1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
+ 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
+2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
+ 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
+2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
+3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
+2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
+1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
+6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
+1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
+2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
+1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
+ 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
+ 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
+3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
+3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
+1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
+1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
+1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
+1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
+ 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
+ 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
+2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
+ 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
+3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
+2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
+ 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
+1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
+2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
+ 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
+1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
+ 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
+4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
+2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
+1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
+ 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
+1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
+2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
+ 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
+6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
+1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
+1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
+2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
+3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
+ 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
+3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
+1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
+ 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
+1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
+ 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
+3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
+ 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
+2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
+ 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
+4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
+2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
+1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
+1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
+1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
+ 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
+1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
+3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
+1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
+3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
+ 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
+ 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
+ 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
+2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
+1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
+ 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
+1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
+ 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
+1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
+ 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
+ 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
+ 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
+1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
+1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
+2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
+4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
+ 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
+1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
+ 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
+1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
+3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
+1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
+2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
+2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
+1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
+1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
+2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
+ 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
+2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
+1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
+1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
+1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
+1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
+3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
+2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
+2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
+ 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
+3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
+3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
+1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
+2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
+1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
+2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
+)
+
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jpcntx.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jpcntx.py
new file mode 100755
index 00000000..20044e4b
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/jpcntx.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+
+# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
+jp2CharContext = (
+(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
+(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
+(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
+(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
+(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
+(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
+(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
+(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
+(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
+(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
+(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
+(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
+(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
+(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
+(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
+(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
+(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
+(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
+(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
+(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
+(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
+(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
+(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
+(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
+(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
+(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
+(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
+(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
+(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
+(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
+(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
+(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
+(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
+(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
+(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
+(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
+(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
+(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
+(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
+(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
+(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
+(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
+(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
+(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
+(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
+(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
+(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
+(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
+(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
+(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
+(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
+(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
+(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
+(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
+(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
+(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
+(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
+(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
+(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
+(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
+(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
+(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
+(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
+(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
+(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
+(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
+(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
+(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
+(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
+(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
+(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
+(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
+(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
+(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
+)
+
+class JapaneseContextAnalysis(object):
+ NUM_OF_CATEGORY = 6
+ DONT_KNOW = -1
+ ENOUGH_REL_THRESHOLD = 100
+ MAX_REL_THRESHOLD = 1000
+ MINIMUM_DATA_THRESHOLD = 4
+
+ def __init__(self):
+ self._total_rel = None
+ self._rel_sample = None
+ self._need_to_skip_char_num = None
+ self._last_char_order = None
+ self._done = None
+ self.reset()
+
+ def reset(self):
+ self._total_rel = 0 # total sequence received
+ # category counters, each integer counts sequence in its category
+ self._rel_sample = [0] * self.NUM_OF_CATEGORY
+ # if last byte in current buffer is not the last byte of a character,
+ # we need to know how many bytes to skip in next buffer
+ self._need_to_skip_char_num = 0
+ self._last_char_order = -1 # The order of previous char
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+
+ def feed(self, byte_str, num_bytes):
+ if self._done:
+ return
+
+ # The buffer we got is byte oriented, and a character may span in more than one
+ # buffers. In case the last one or two byte in last buffer is not
+ # complete, we record how many byte needed to complete that character
+ # and skip these bytes here. We can choose to record those bytes as
+ # well and analyse the character once it is complete, but since a
+ # character will not make much difference, by simply skipping
+ # this character will simply our logic and improve performance.
+ i = self._need_to_skip_char_num
+ while i < num_bytes:
+ order, char_len = self.get_order(byte_str[i:i + 2])
+ i += char_len
+ if i > num_bytes:
+ self._need_to_skip_char_num = i - num_bytes
+ self._last_char_order = -1
+ else:
+ if (order != -1) and (self._last_char_order != -1):
+ self._total_rel += 1
+ if self._total_rel > self.MAX_REL_THRESHOLD:
+ self._done = True
+ break
+ self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
+ self._last_char_order = order
+
+ def got_enough_data(self):
+ return self._total_rel > self.ENOUGH_REL_THRESHOLD
+
+ def get_confidence(self):
+ # This is just one way to calculate confidence. It works well for me.
+ if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
+ return (self._total_rel - self._rel_sample[0]) / self._total_rel
+ else:
+ return self.DONT_KNOW
+
+ def get_order(self, byte_str):
+ return -1, 1
+
+class SJISContextAnalysis(JapaneseContextAnalysis):
+ def __init__(self):
+ super(SJISContextAnalysis, self).__init__()
+ self._charset_name = "SHIFT_JIS"
+
+ @property
+ def charset_name(self):
+ return self._charset_name
+
+ def get_order(self, byte_str):
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
+ char_len = 2
+ if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
+ self._charset_name = "CP932"
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 202) and (0x9F <= second_char <= 0xF1):
+ return second_char - 0x9F, char_len
+
+ return -1, char_len
+
+class EUCJPContextAnalysis(JapaneseContextAnalysis):
+ def get_order(self, byte_str):
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
+ char_len = 2
+ elif first_char == 0x8F:
+ char_len = 3
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
+ return second_char - 0xA1, char_len
+
+ return -1, char_len
+
+
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langbulgarianmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langbulgarianmodel.py
new file mode 100755
index 00000000..2aa4fb2e
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langbulgarianmodel.py
@@ -0,0 +1,228 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+# this table is modified base on win1251BulgarianCharToOrderMap, so
+# only number <64 is sure valid
+
+Latin5_BulgarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
+110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
+253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
+116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
+194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
+210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
+ 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
+ 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
+ 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
+ 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
+ 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
+ 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
+)
+
+win1251BulgarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
+110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
+253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
+116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
+206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
+221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
+ 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
+ 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
+ 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
+ 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
+ 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
+ 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 96.9392%
+# first 1024 sequences:3.0618%
+# rest sequences: 0.2992%
+# negative sequences: 0.0020%
+BulgarianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
+3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
+0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
+0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
+0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
+0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
+0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
+2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
+3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
+1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
+3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
+1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
+2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
+2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
+3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
+1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
+2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
+2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
+3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
+1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
+2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
+2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
+2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
+1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
+2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
+1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
+3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
+1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
+3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
+1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
+2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
+1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
+2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
+1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
+2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
+1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
+1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
+1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
+2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
+1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
+2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
+1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
+0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
+1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
+1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
+1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
+0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
+0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
+0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
+1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
+1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
+1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
+1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+)
+
+Latin5BulgarianModel = {
+ 'char_to_order_map': Latin5_BulgarianCharToOrderMap,
+ 'precedence_matrix': BulgarianLangModel,
+ 'typical_positive_ratio': 0.969392,
+ 'keep_english_letter': False,
+ 'charset_name': "ISO-8859-5",
+ 'language': 'Bulgairan',
+}
+
+Win1251BulgarianModel = {
+ 'char_to_order_map': win1251BulgarianCharToOrderMap,
+ 'precedence_matrix': BulgarianLangModel,
+ 'typical_positive_ratio': 0.969392,
+ 'keep_english_letter': False,
+ 'charset_name': "windows-1251",
+ 'language': 'Bulgarian',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langcyrillicmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langcyrillicmodel.py
new file mode 100755
index 00000000..e5f9a1fd
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langcyrillicmodel.py
@@ -0,0 +1,333 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# KOI8-R language model
+# Character Mapping Table:
+KOI8R_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
+223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
+238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
+ 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
+ 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
+ 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
+ 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
+)
+
+win1251_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+ 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
+ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+)
+
+latin5_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+ 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
+ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
+)
+
+macCyrillic_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
+ 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
+ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
+)
+
+IBM855_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
+206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
+ 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
+220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
+230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
+ 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
+ 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
+250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
+)
+
+IBM866_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+ 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 97.6601%
+# first 1024 sequences: 2.3389%
+# rest sequences: 0.1237%
+# negative sequences: 0.0009%
+RussianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
+3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
+0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
+0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
+1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
+1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
+2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
+1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
+3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
+1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
+2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
+1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
+1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
+1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
+2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
+1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
+3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
+1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
+2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
+1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
+2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
+1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
+1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
+1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
+3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
+2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
+3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
+1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
+1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
+0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
+1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
+1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
+0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
+1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
+2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
+1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
+1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
+2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
+1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
+0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
+2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
+1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
+1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
+0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
+0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
+0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
+0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
+0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
+0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
+2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
+0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
+)
+
+Koi8rModel = {
+ 'char_to_order_map': KOI8R_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "KOI8-R",
+ 'language': 'Russian',
+}
+
+Win1251CyrillicModel = {
+ 'char_to_order_map': win1251_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "windows-1251",
+ 'language': 'Russian',
+}
+
+Latin5CyrillicModel = {
+ 'char_to_order_map': latin5_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "ISO-8859-5",
+ 'language': 'Russian',
+}
+
+MacCyrillicModel = {
+ 'char_to_order_map': macCyrillic_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "MacCyrillic",
+ 'language': 'Russian',
+}
+
+Ibm866Model = {
+ 'char_to_order_map': IBM866_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "IBM866",
+ 'language': 'Russian',
+}
+
+Ibm855Model = {
+ 'char_to_order_map': IBM855_char_to_order_map,
+ 'precedence_matrix': RussianLangModel,
+ 'typical_positive_ratio': 0.976601,
+ 'keep_english_letter': False,
+ 'charset_name': "IBM855",
+ 'language': 'Russian',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langgreekmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langgreekmodel.py
new file mode 100755
index 00000000..53322216
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langgreekmodel.py
@@ -0,0 +1,225 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin7_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
+ 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
+253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
+ 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
+253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
+253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
+110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
+ 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
+124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
+ 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
+)
+
+win1253_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
+ 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
+253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
+ 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
+253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
+253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
+110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
+ 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
+124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
+ 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 98.2851%
+# first 1024 sequences:1.7001%
+# rest sequences: 0.0359%
+# negative sequences: 0.0148%
+GreekLangModel = (
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
+3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
+2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
+0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
+2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
+2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
+0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
+2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
+0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
+3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
+3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
+2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
+2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
+0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
+0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
+0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
+0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
+0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
+0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
+0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
+0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
+0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
+0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
+0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
+0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
+0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
+0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
+0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
+0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
+0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
+0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
+0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
+0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
+0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
+0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
+0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
+0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
+0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
+0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
+0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
+0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
+0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin7GreekModel = {
+ 'char_to_order_map': Latin7_char_to_order_map,
+ 'precedence_matrix': GreekLangModel,
+ 'typical_positive_ratio': 0.982851,
+ 'keep_english_letter': False,
+ 'charset_name': "ISO-8859-7",
+ 'language': 'Greek',
+}
+
+Win1253GreekModel = {
+ 'char_to_order_map': win1253_char_to_order_map,
+ 'precedence_matrix': GreekLangModel,
+ 'typical_positive_ratio': 0.982851,
+ 'keep_english_letter': False,
+ 'charset_name': "windows-1253",
+ 'language': 'Greek',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhebrewmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhebrewmodel.py
new file mode 100755
index 00000000..58f4c875
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhebrewmodel.py
@@ -0,0 +1,200 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Simon Montagu
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Shoshannah Forbes - original C code (?)
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Windows-1255 language model
+# Character Mapping Table:
+WIN1255_CHAR_TO_ORDER_MAP = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
+ 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
+253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
+ 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
+124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
+215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
+ 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
+106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
+ 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
+238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
+ 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
+ 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 98.4004%
+# first 1024 sequences: 1.5981%
+# rest sequences: 0.087%
+# negative sequences: 0.0015%
+HEBREW_LANG_MODEL = (
+0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
+3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
+1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
+1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
+1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
+1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
+1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
+0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
+0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
+1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
+0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
+0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
+0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
+0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
+0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
+0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
+0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
+0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
+0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
+0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
+0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
+0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
+1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
+0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
+0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
+0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
+0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
+0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
+2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
+0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
+0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
+1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
+0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
+2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
+1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
+2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
+2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
+0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
+0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
+)
+
+Win1255HebrewModel = {
+ 'char_to_order_map': WIN1255_CHAR_TO_ORDER_MAP,
+ 'precedence_matrix': HEBREW_LANG_MODEL,
+ 'typical_positive_ratio': 0.984004,
+ 'keep_english_letter': False,
+ 'charset_name': "windows-1255",
+ 'language': 'Hebrew',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhungarianmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhungarianmodel.py
new file mode 100755
index 00000000..bb7c095e
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langhungarianmodel.py
@@ -0,0 +1,225 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin2_HungarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
+ 46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
+253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
+ 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
+159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
+175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
+191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
+ 79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
+221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
+232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
+ 82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
+245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
+)
+
+win1250HungarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
+ 46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
+253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
+ 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
+161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
+177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
+191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
+ 81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
+221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
+232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
+ 84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
+245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 94.7368%
+# first 1024 sequences:5.2623%
+# rest sequences: 0.8894%
+# negative sequences: 0.0009%
+HungarianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
+3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
+3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
+0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
+0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
+1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
+1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
+1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
+3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
+2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
+2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
+2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
+2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
+2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
+3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
+2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
+2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
+2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
+1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
+1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
+3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
+1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
+1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
+2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
+2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
+2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
+3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
+2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
+1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
+1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
+2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
+2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
+1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
+1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
+2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
+1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
+1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
+2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
+2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
+2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
+1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
+1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
+1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
+0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
+2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
+2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
+1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
+2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
+1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
+1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
+2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
+2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
+2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
+1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
+2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
+0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
+0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin2HungarianModel = {
+ 'char_to_order_map': Latin2_HungarianCharToOrderMap,
+ 'precedence_matrix': HungarianLangModel,
+ 'typical_positive_ratio': 0.947368,
+ 'keep_english_letter': True,
+ 'charset_name': "ISO-8859-2",
+ 'language': 'Hungarian',
+}
+
+Win1250HungarianModel = {
+ 'char_to_order_map': win1250HungarianCharToOrderMap,
+ 'precedence_matrix': HungarianLangModel,
+ 'typical_positive_ratio': 0.947368,
+ 'keep_english_letter': True,
+ 'charset_name': "windows-1250",
+ 'language': 'Hungarian',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langthaimodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langthaimodel.py
new file mode 100755
index 00000000..15f94c2d
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langthaimodel.py
@@ -0,0 +1,199 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# The following result for thai was collected from a limited sample (1M).
+
+# Character Mapping Table:
+TIS620CharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
+253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
+188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
+253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
+ 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
+209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
+223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
+236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
+ 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
+ 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
+ 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
+ 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
+ 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 92.6386%
+# first 1024 sequences:7.3177%
+# rest sequences: 1.0230%
+# negative sequences: 0.0436%
+ThaiLangModel = (
+0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
+0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
+3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
+0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
+3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
+3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
+3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
+3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
+3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
+3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
+2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
+3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
+0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
+0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
+1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
+3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
+3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
+1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
+0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
+0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
+3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
+2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
+3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
+0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
+3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
+3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
+2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
+3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
+2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
+3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
+3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
+3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
+3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
+1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
+0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
+0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
+3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
+3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
+1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
+3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
+3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
+0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
+0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
+1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
+1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
+3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
+0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
+3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
+0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
+0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
+0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
+0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
+0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
+0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
+0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
+3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
+0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
+0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
+3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
+2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
+0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
+3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
+1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
+1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
+1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+TIS620ThaiModel = {
+ 'char_to_order_map': TIS620CharToOrderMap,
+ 'precedence_matrix': ThaiLangModel,
+ 'typical_positive_ratio': 0.926386,
+ 'keep_english_letter': False,
+ 'charset_name': "TIS-620",
+ 'language': 'Thai',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langturkishmodel.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langturkishmodel.py
new file mode 100755
index 00000000..a427a457
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/langturkishmodel.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Özgür Baskın - Turkish Language Model
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin5_TurkishCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42,
+ 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255,
+255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15,
+ 26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255,
+180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,
+164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106,
+150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136,
+ 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125,
+124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119,
+ 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86,
+ 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96,
+ 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107,
+)
+
+TurkishLangModel = (
+3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3,
+3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
+3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3,
+3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1,
+3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3,
+3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1,
+3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2,
+2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1,
+3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2,
+2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0,
+1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2,
+3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1,
+3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2,
+2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1,
+3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2,
+2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,
+3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2,
+3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
+3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3,
+0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,
+3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1,
+0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,
+3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1,
+3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3,
+2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3,
+2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
+3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0,
+0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0,
+1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2,
+3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1,
+1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,
+3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0,
+0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0,
+3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1,
+0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1,
+1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
+1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3,
+2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1,
+2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
+0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
+3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0,
+0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,
+3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1,
+1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2,
+0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1,
+3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1,
+0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0,
+3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,
+3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,
+1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2,
+2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1,
+0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0,
+3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0,
+0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0,
+0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0,
+3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0,
+0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0,
+0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0,
+3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0,
+0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1,
+3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,
+0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1,
+0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,
+3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
+0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0,
+0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0,
+3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0,
+0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0,
+0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0,
+0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
+3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0,
+0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
+0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0,
+0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0,
+0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0,
+1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0,
+0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1,
+0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0,
+3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0,
+0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,
+2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,
+2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0,
+0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,
+0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin5TurkishModel = {
+ 'char_to_order_map': Latin5_TurkishCharToOrderMap,
+ 'precedence_matrix': TurkishLangModel,
+ 'typical_positive_ratio': 0.970290,
+ 'keep_english_letter': True,
+ 'charset_name': "ISO-8859-9",
+ 'language': 'Turkish',
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/latin1prober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/latin1prober.py
new file mode 100755
index 00000000..7d1e8c20
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/latin1prober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+FREQ_CAT_NUM = 4
+
+UDF = 0 # undefined
+OTH = 1 # other
+ASC = 2 # ascii capital letter
+ASS = 3 # ascii small letter
+ACV = 4 # accent capital vowel
+ACO = 5 # accent capital other
+ASV = 6 # accent small vowel
+ASO = 7 # accent small other
+CLASS_NUM = 8 # total classes
+
+Latin1_CharToClass = (
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
+ OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
+ ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
+ OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
+ ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
+ OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
+ OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
+ UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
+ OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
+ ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
+ ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
+ ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
+ ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
+ ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
+ ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
+ ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
+ ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
+)
+
+# 0 : illegal
+# 1 : very unlikely
+# 2 : normal
+# 3 : very likely
+Latin1ClassModel = (
+# UDF OTH ASC ASS ACV ACO ASV ASO
+ 0, 0, 0, 0, 0, 0, 0, 0, # UDF
+ 0, 3, 3, 3, 3, 3, 3, 3, # OTH
+ 0, 3, 3, 3, 3, 3, 3, 3, # ASC
+ 0, 3, 3, 3, 1, 1, 3, 3, # ASS
+ 0, 3, 3, 3, 1, 2, 1, 2, # ACV
+ 0, 3, 3, 3, 3, 3, 3, 3, # ACO
+ 0, 3, 1, 3, 1, 1, 1, 3, # ASV
+ 0, 3, 1, 3, 1, 1, 3, 3, # ASO
+)
+
+
+class Latin1Prober(CharSetProber):
+ def __init__(self):
+ super(Latin1Prober, self).__init__()
+ self._last_char_class = None
+ self._freq_counter = None
+ self.reset()
+
+ def reset(self):
+ self._last_char_class = OTH
+ self._freq_counter = [0] * FREQ_CAT_NUM
+ CharSetProber.reset(self)
+
+ @property
+ def charset_name(self):
+ return "ISO-8859-1"
+
+ @property
+ def language(self):
+ return ""
+
+ def feed(self, byte_str):
+ byte_str = self.filter_with_english_letters(byte_str)
+ for c in byte_str:
+ char_class = Latin1_CharToClass[c]
+ freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
+ + char_class]
+ if freq == 0:
+ self._state = ProbingState.NOT_ME
+ break
+ self._freq_counter[freq] += 1
+ self._last_char_class = char_class
+
+ return self.state
+
+ def get_confidence(self):
+ if self.state == ProbingState.NOT_ME:
+ return 0.01
+
+ total = sum(self._freq_counter)
+ if total < 0.01:
+ confidence = 0.0
+ else:
+ confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
+ / total)
+ if confidence < 0.0:
+ confidence = 0.0
+ # lower the confidence of latin1 so that other more accurate
+ # detector can take priority.
+ confidence = confidence * 0.73
+ return confidence
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcharsetprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcharsetprober.py
new file mode 100755
index 00000000..6256ecfd
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcharsetprober.py
@@ -0,0 +1,91 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+
+
+class MultiByteCharSetProber(CharSetProber):
+ """
+ MultiByteCharSetProber
+ """
+
+ def __init__(self, lang_filter=None):
+ super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
+ self.distribution_analyzer = None
+ self.coding_sm = None
+ self._last_char = [0, 0]
+
+ def reset(self):
+ super(MultiByteCharSetProber, self).reset()
+ if self.coding_sm:
+ self.coding_sm.reset()
+ if self.distribution_analyzer:
+ self.distribution_analyzer.reset()
+ self._last_char = [0, 0]
+
+ @property
+ def charset_name(self):
+ raise NotImplementedError
+
+ @property
+ def language(self):
+ raise NotImplementedError
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.distribution_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ return self.distribution_analyzer.get_confidence()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcsgroupprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcsgroupprober.py
new file mode 100755
index 00000000..530abe75
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcsgroupprober.py
@@ -0,0 +1,54 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .utf8prober import UTF8Prober
+from .sjisprober import SJISProber
+from .eucjpprober import EUCJPProber
+from .gb2312prober import GB2312Prober
+from .euckrprober import EUCKRProber
+from .cp949prober import CP949Prober
+from .big5prober import Big5Prober
+from .euctwprober import EUCTWProber
+
+
+class MBCSGroupProber(CharSetGroupProber):
+ def __init__(self, lang_filter=None):
+ super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
+ self.probers = [
+ UTF8Prober(),
+ SJISProber(),
+ EUCJPProber(),
+ GB2312Prober(),
+ EUCKRProber(),
+ CP949Prober(),
+ Big5Prober(),
+ EUCTWProber()
+ ]
+ self.reset()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcssm.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcssm.py
new file mode 100755
index 00000000..8360d0f2
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/mbcssm.py
@@ -0,0 +1,572 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+# BIG5
+
+BIG5_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,1, # 78 - 7f
+ 4,4,4,4,4,4,4,4, # 80 - 87
+ 4,4,4,4,4,4,4,4, # 88 - 8f
+ 4,4,4,4,4,4,4,4, # 90 - 97
+ 4,4,4,4,4,4,4,4, # 98 - 9f
+ 4,3,3,3,3,3,3,3, # a0 - a7
+ 3,3,3,3,3,3,3,3, # a8 - af
+ 3,3,3,3,3,3,3,3, # b0 - b7
+ 3,3,3,3,3,3,3,3, # b8 - bf
+ 3,3,3,3,3,3,3,3, # c0 - c7
+ 3,3,3,3,3,3,3,3, # c8 - cf
+ 3,3,3,3,3,3,3,3, # d0 - d7
+ 3,3,3,3,3,3,3,3, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,3,3,3, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,3,3,0 # f8 - ff
+)
+
+BIG5_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
+)
+
+BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
+
+BIG5_SM_MODEL = {'class_table': BIG5_CLS,
+ 'class_factor': 5,
+ 'state_table': BIG5_ST,
+ 'char_len_table': BIG5_CHAR_LEN_TABLE,
+ 'name': 'Big5'}
+
+# CP949
+
+CP949_CLS = (
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
+ 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
+ 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
+ 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
+ 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
+ 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
+ 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
+ 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
+ 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
+ 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
+ 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
+)
+
+CP949_ST = (
+#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
+)
+
+CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
+
+CP949_SM_MODEL = {'class_table': CP949_CLS,
+ 'class_factor': 10,
+ 'state_table': CP949_ST,
+ 'char_len_table': CP949_CHAR_LEN_TABLE,
+ 'name': 'CP949'}
+
+# EUC-JP
+
+EUCJP_CLS = (
+ 4,4,4,4,4,4,4,4, # 00 - 07
+ 4,4,4,4,4,4,5,5, # 08 - 0f
+ 4,4,4,4,4,4,4,4, # 10 - 17
+ 4,4,4,5,4,4,4,4, # 18 - 1f
+ 4,4,4,4,4,4,4,4, # 20 - 27
+ 4,4,4,4,4,4,4,4, # 28 - 2f
+ 4,4,4,4,4,4,4,4, # 30 - 37
+ 4,4,4,4,4,4,4,4, # 38 - 3f
+ 4,4,4,4,4,4,4,4, # 40 - 47
+ 4,4,4,4,4,4,4,4, # 48 - 4f
+ 4,4,4,4,4,4,4,4, # 50 - 57
+ 4,4,4,4,4,4,4,4, # 58 - 5f
+ 4,4,4,4,4,4,4,4, # 60 - 67
+ 4,4,4,4,4,4,4,4, # 68 - 6f
+ 4,4,4,4,4,4,4,4, # 70 - 77
+ 4,4,4,4,4,4,4,4, # 78 - 7f
+ 5,5,5,5,5,5,5,5, # 80 - 87
+ 5,5,5,5,5,5,1,3, # 88 - 8f
+ 5,5,5,5,5,5,5,5, # 90 - 97
+ 5,5,5,5,5,5,5,5, # 98 - 9f
+ 5,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,2,2,2, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,2,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,0,5 # f8 - ff
+)
+
+EUCJP_ST = (
+ 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
+ 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
+)
+
+EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
+
+EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
+ 'class_factor': 6,
+ 'state_table': EUCJP_ST,
+ 'char_len_table': EUCJP_CHAR_LEN_TABLE,
+ 'name': 'EUC-JP'}
+
+# EUC-KR
+
+EUCKR_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 1,1,1,1,1,1,1,1, # 40 - 47
+ 1,1,1,1,1,1,1,1, # 48 - 4f
+ 1,1,1,1,1,1,1,1, # 50 - 57
+ 1,1,1,1,1,1,1,1, # 58 - 5f
+ 1,1,1,1,1,1,1,1, # 60 - 67
+ 1,1,1,1,1,1,1,1, # 68 - 6f
+ 1,1,1,1,1,1,1,1, # 70 - 77
+ 1,1,1,1,1,1,1,1, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,3,3,3, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,3,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 2,2,2,2,2,2,2,2, # e0 - e7
+ 2,2,2,2,2,2,2,2, # e8 - ef
+ 2,2,2,2,2,2,2,2, # f0 - f7
+ 2,2,2,2,2,2,2,0 # f8 - ff
+)
+
+EUCKR_ST = (
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
+)
+
+EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
+
+EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
+ 'class_factor': 4,
+ 'state_table': EUCKR_ST,
+ 'char_len_table': EUCKR_CHAR_LEN_TABLE,
+ 'name': 'EUC-KR'}
+
+# EUC-TW
+
+EUCTW_CLS = (
+ 2,2,2,2,2,2,2,2, # 00 - 07
+ 2,2,2,2,2,2,0,0, # 08 - 0f
+ 2,2,2,2,2,2,2,2, # 10 - 17
+ 2,2,2,0,2,2,2,2, # 18 - 1f
+ 2,2,2,2,2,2,2,2, # 20 - 27
+ 2,2,2,2,2,2,2,2, # 28 - 2f
+ 2,2,2,2,2,2,2,2, # 30 - 37
+ 2,2,2,2,2,2,2,2, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,2, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,6,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,3,4,4,4,4,4,4, # a0 - a7
+ 5,5,1,1,1,1,1,1, # a8 - af
+ 1,1,1,1,1,1,1,1, # b0 - b7
+ 1,1,1,1,1,1,1,1, # b8 - bf
+ 1,1,3,1,3,3,3,3, # c0 - c7
+ 3,3,3,3,3,3,3,3, # c8 - cf
+ 3,3,3,3,3,3,3,3, # d0 - d7
+ 3,3,3,3,3,3,3,3, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,3,3,3, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,3,3,0 # f8 - ff
+)
+
+EUCTW_ST = (
+ MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
+ MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
+ MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
+
+EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
+ 'class_factor': 7,
+ 'state_table': EUCTW_ST,
+ 'char_len_table': EUCTW_CHAR_LEN_TABLE,
+ 'name': 'x-euc-tw'}
+
+# GB2312
+
+GB2312_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 3,3,3,3,3,3,3,3, # 30 - 37
+ 3,3,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,4, # 78 - 7f
+ 5,6,6,6,6,6,6,6, # 80 - 87
+ 6,6,6,6,6,6,6,6, # 88 - 8f
+ 6,6,6,6,6,6,6,6, # 90 - 97
+ 6,6,6,6,6,6,6,6, # 98 - 9f
+ 6,6,6,6,6,6,6,6, # a0 - a7
+ 6,6,6,6,6,6,6,6, # a8 - af
+ 6,6,6,6,6,6,6,6, # b0 - b7
+ 6,6,6,6,6,6,6,6, # b8 - bf
+ 6,6,6,6,6,6,6,6, # c0 - c7
+ 6,6,6,6,6,6,6,6, # c8 - cf
+ 6,6,6,6,6,6,6,6, # d0 - d7
+ 6,6,6,6,6,6,6,6, # d8 - df
+ 6,6,6,6,6,6,6,6, # e0 - e7
+ 6,6,6,6,6,6,6,6, # e8 - ef
+ 6,6,6,6,6,6,6,6, # f0 - f7
+ 6,6,6,6,6,6,6,0 # f8 - ff
+)
+
+GB2312_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
+ 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+# To be accurate, the length of class 6 can be either 2 or 4.
+# But it is not necessary to discriminate between the two since
+# it is used for frequency analysis only, and we are validating
+# each code range there as well. So it is safe to set it to be
+# 2 here.
+GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
+
+GB2312_SM_MODEL = {'class_table': GB2312_CLS,
+ 'class_factor': 7,
+ 'state_table': GB2312_ST,
+ 'char_len_table': GB2312_CHAR_LEN_TABLE,
+ 'name': 'GB2312'}
+
+# Shift_JIS
+
+SJIS_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,1, # 78 - 7f
+ 3,3,3,3,3,2,2,3, # 80 - 87
+ 3,3,3,3,3,3,3,3, # 88 - 8f
+ 3,3,3,3,3,3,3,3, # 90 - 97
+ 3,3,3,3,3,3,3,3, # 98 - 9f
+ #0xa0 is illegal in sjis encoding, but some pages does
+ #contain such byte. We need to be more error forgiven.
+ 2,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,2,2,2, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,2,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,4,4,4, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,0,0,0) # f8 - ff
+
+
+SJIS_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
+)
+
+SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
+
+SJIS_SM_MODEL = {'class_table': SJIS_CLS,
+ 'class_factor': 6,
+ 'state_table': SJIS_ST,
+ 'char_len_table': SJIS_CHAR_LEN_TABLE,
+ 'name': 'Shift_JIS'}
+
+# UCS2-BE
+
+UCS2BE_CLS = (
+ 0,0,0,0,0,0,0,0, # 00 - 07
+ 0,0,1,0,0,2,0,0, # 08 - 0f
+ 0,0,0,0,0,0,0,0, # 10 - 17
+ 0,0,0,3,0,0,0,0, # 18 - 1f
+ 0,0,0,0,0,0,0,0, # 20 - 27
+ 0,3,3,3,3,3,0,0, # 28 - 2f
+ 0,0,0,0,0,0,0,0, # 30 - 37
+ 0,0,0,0,0,0,0,0, # 38 - 3f
+ 0,0,0,0,0,0,0,0, # 40 - 47
+ 0,0,0,0,0,0,0,0, # 48 - 4f
+ 0,0,0,0,0,0,0,0, # 50 - 57
+ 0,0,0,0,0,0,0,0, # 58 - 5f
+ 0,0,0,0,0,0,0,0, # 60 - 67
+ 0,0,0,0,0,0,0,0, # 68 - 6f
+ 0,0,0,0,0,0,0,0, # 70 - 77
+ 0,0,0,0,0,0,0,0, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,0,0,0,0,0,0,0, # a0 - a7
+ 0,0,0,0,0,0,0,0, # a8 - af
+ 0,0,0,0,0,0,0,0, # b0 - b7
+ 0,0,0,0,0,0,0,0, # b8 - bf
+ 0,0,0,0,0,0,0,0, # c0 - c7
+ 0,0,0,0,0,0,0,0, # c8 - cf
+ 0,0,0,0,0,0,0,0, # d0 - d7
+ 0,0,0,0,0,0,0,0, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,4,5 # f8 - ff
+)
+
+UCS2BE_ST = (
+ 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
+ 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
+ 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
+ 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
+ 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
+
+UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
+ 'class_factor': 6,
+ 'state_table': UCS2BE_ST,
+ 'char_len_table': UCS2BE_CHAR_LEN_TABLE,
+ 'name': 'UTF-16BE'}
+
+# UCS2-LE
+
+UCS2LE_CLS = (
+ 0,0,0,0,0,0,0,0, # 00 - 07
+ 0,0,1,0,0,2,0,0, # 08 - 0f
+ 0,0,0,0,0,0,0,0, # 10 - 17
+ 0,0,0,3,0,0,0,0, # 18 - 1f
+ 0,0,0,0,0,0,0,0, # 20 - 27
+ 0,3,3,3,3,3,0,0, # 28 - 2f
+ 0,0,0,0,0,0,0,0, # 30 - 37
+ 0,0,0,0,0,0,0,0, # 38 - 3f
+ 0,0,0,0,0,0,0,0, # 40 - 47
+ 0,0,0,0,0,0,0,0, # 48 - 4f
+ 0,0,0,0,0,0,0,0, # 50 - 57
+ 0,0,0,0,0,0,0,0, # 58 - 5f
+ 0,0,0,0,0,0,0,0, # 60 - 67
+ 0,0,0,0,0,0,0,0, # 68 - 6f
+ 0,0,0,0,0,0,0,0, # 70 - 77
+ 0,0,0,0,0,0,0,0, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,0,0,0,0,0,0,0, # a0 - a7
+ 0,0,0,0,0,0,0,0, # a8 - af
+ 0,0,0,0,0,0,0,0, # b0 - b7
+ 0,0,0,0,0,0,0,0, # b8 - bf
+ 0,0,0,0,0,0,0,0, # c0 - c7
+ 0,0,0,0,0,0,0,0, # c8 - cf
+ 0,0,0,0,0,0,0,0, # d0 - d7
+ 0,0,0,0,0,0,0,0, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,4,5 # f8 - ff
+)
+
+UCS2LE_ST = (
+ 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
+ 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
+ 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
+
+UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
+ 'class_factor': 6,
+ 'state_table': UCS2LE_ST,
+ 'char_len_table': UCS2LE_CHAR_LEN_TABLE,
+ 'name': 'UTF-16LE'}
+
+# UTF-8
+
+UTF8_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 1,1,1,1,1,1,1,1, # 40 - 47
+ 1,1,1,1,1,1,1,1, # 48 - 4f
+ 1,1,1,1,1,1,1,1, # 50 - 57
+ 1,1,1,1,1,1,1,1, # 58 - 5f
+ 1,1,1,1,1,1,1,1, # 60 - 67
+ 1,1,1,1,1,1,1,1, # 68 - 6f
+ 1,1,1,1,1,1,1,1, # 70 - 77
+ 1,1,1,1,1,1,1,1, # 78 - 7f
+ 2,2,2,2,3,3,3,3, # 80 - 87
+ 4,4,4,4,4,4,4,4, # 88 - 8f
+ 4,4,4,4,4,4,4,4, # 90 - 97
+ 4,4,4,4,4,4,4,4, # 98 - 9f
+ 5,5,5,5,5,5,5,5, # a0 - a7
+ 5,5,5,5,5,5,5,5, # a8 - af
+ 5,5,5,5,5,5,5,5, # b0 - b7
+ 5,5,5,5,5,5,5,5, # b8 - bf
+ 0,0,6,6,6,6,6,6, # c0 - c7
+ 6,6,6,6,6,6,6,6, # c8 - cf
+ 6,6,6,6,6,6,6,6, # d0 - d7
+ 6,6,6,6,6,6,6,6, # d8 - df
+ 7,8,8,8,8,8,8,8, # e0 - e7
+ 8,8,8,8,8,9,8,8, # e8 - ef
+ 10,11,11,11,11,11,11,11, # f0 - f7
+ 12,13,13,13,14,15,0,0 # f8 - ff
+)
+
+UTF8_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
+ 9, 11, 8, 7, 6, 5, 4, 3,#08-0f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
+ MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
+ MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
+ MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
+)
+
+UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
+
+UTF8_SM_MODEL = {'class_table': UTF8_CLS,
+ 'class_factor': 16,
+ 'state_table': UTF8_ST,
+ 'char_len_table': UTF8_CHAR_LEN_TABLE,
+ 'name': 'UTF-8'}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcharsetprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcharsetprober.py
new file mode 100755
index 00000000..0adb51de
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcharsetprober.py
@@ -0,0 +1,132 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import CharacterCategory, ProbingState, SequenceLikelihood
+
+
+class SingleByteCharSetProber(CharSetProber):
+ SAMPLE_SIZE = 64
+ SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
+ POSITIVE_SHORTCUT_THRESHOLD = 0.95
+ NEGATIVE_SHORTCUT_THRESHOLD = 0.05
+
+ def __init__(self, model, reversed=False, name_prober=None):
+ super(SingleByteCharSetProber, self).__init__()
+ self._model = model
+ # TRUE if we need to reverse every pair in the model lookup
+ self._reversed = reversed
+ # Optional auxiliary prober for name decision
+ self._name_prober = name_prober
+ self._last_order = None
+ self._seq_counters = None
+ self._total_seqs = None
+ self._total_char = None
+ self._freq_char = None
+ self.reset()
+
+ def reset(self):
+ super(SingleByteCharSetProber, self).reset()
+ # char order of last character
+ self._last_order = 255
+ self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
+ self._total_seqs = 0
+ self._total_char = 0
+ # characters that fall in our sampling range
+ self._freq_char = 0
+
+ @property
+ def charset_name(self):
+ if self._name_prober:
+ return self._name_prober.charset_name
+ else:
+ return self._model['charset_name']
+
+ @property
+ def language(self):
+ if self._name_prober:
+ return self._name_prober.language
+ else:
+ return self._model.get('language')
+
+ def feed(self, byte_str):
+ if not self._model['keep_english_letter']:
+ byte_str = self.filter_international_words(byte_str)
+ if not byte_str:
+ return self.state
+ char_to_order_map = self._model['char_to_order_map']
+ for i, c in enumerate(byte_str):
+ # XXX: Order is in range 1-64, so one would think we want 0-63 here,
+ # but that leads to 27 more test failures than before.
+ order = char_to_order_map[c]
+ # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
+ # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
+ # to make it closer to the original intent. The only difference
+ # is whether or not we count digits and control characters for
+ # _total_char purposes.
+ if order < CharacterCategory.CONTROL:
+ self._total_char += 1
+ if order < self.SAMPLE_SIZE:
+ self._freq_char += 1
+ if self._last_order < self.SAMPLE_SIZE:
+ self._total_seqs += 1
+ if not self._reversed:
+ i = (self._last_order * self.SAMPLE_SIZE) + order
+ model = self._model['precedence_matrix'][i]
+ else: # reverse the order of the letters in the lookup
+ i = (order * self.SAMPLE_SIZE) + self._last_order
+ model = self._model['precedence_matrix'][i]
+ self._seq_counters[model] += 1
+ self._last_order = order
+
+ charset_name = self._model['charset_name']
+ if self.state == ProbingState.DETECTING:
+ if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
+ confidence = self.get_confidence()
+ if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug('%s confidence = %s, we have a winner',
+ charset_name, confidence)
+ self._state = ProbingState.FOUND_IT
+ elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug('%s confidence = %s, below negative '
+ 'shortcut threshhold %s', charset_name,
+ confidence,
+ self.NEGATIVE_SHORTCUT_THRESHOLD)
+ self._state = ProbingState.NOT_ME
+
+ return self.state
+
+ def get_confidence(self):
+ r = 0.01
+ if self._total_seqs > 0:
+ r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
+ self._total_seqs / self._model['typical_positive_ratio'])
+ r = r * self._freq_char / self._total_char
+ if r >= 1.0:
+ r = 0.99
+ return r
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcsgroupprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcsgroupprober.py
new file mode 100755
index 00000000..98e95dc1
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sbcsgroupprober.py
@@ -0,0 +1,73 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .sbcharsetprober import SingleByteCharSetProber
+from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
+ Latin5CyrillicModel, MacCyrillicModel,
+ Ibm866Model, Ibm855Model)
+from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
+from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
+# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
+from .langthaimodel import TIS620ThaiModel
+from .langhebrewmodel import Win1255HebrewModel
+from .hebrewprober import HebrewProber
+from .langturkishmodel import Latin5TurkishModel
+
+
+class SBCSGroupProber(CharSetGroupProber):
+ def __init__(self):
+ super(SBCSGroupProber, self).__init__()
+ self.probers = [
+ SingleByteCharSetProber(Win1251CyrillicModel),
+ SingleByteCharSetProber(Koi8rModel),
+ SingleByteCharSetProber(Latin5CyrillicModel),
+ SingleByteCharSetProber(MacCyrillicModel),
+ SingleByteCharSetProber(Ibm866Model),
+ SingleByteCharSetProber(Ibm855Model),
+ SingleByteCharSetProber(Latin7GreekModel),
+ SingleByteCharSetProber(Win1253GreekModel),
+ SingleByteCharSetProber(Latin5BulgarianModel),
+ SingleByteCharSetProber(Win1251BulgarianModel),
+ # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
+ # after we retrain model.
+ # SingleByteCharSetProber(Latin2HungarianModel),
+ # SingleByteCharSetProber(Win1250HungarianModel),
+ SingleByteCharSetProber(TIS620ThaiModel),
+ SingleByteCharSetProber(Latin5TurkishModel),
+ ]
+ hebrew_prober = HebrewProber()
+ logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
+ False, hebrew_prober)
+ visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
+ hebrew_prober)
+ hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
+ self.probers.extend([hebrew_prober, logical_hebrew_prober,
+ visual_hebrew_prober])
+
+ self.reset()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sjisprober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sjisprober.py
new file mode 100755
index 00000000..9e29623b
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/sjisprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import SJISDistributionAnalysis
+from .jpcntx import SJISContextAnalysis
+from .mbcssm import SJIS_SM_MODEL
+from .enums import ProbingState, MachineState
+
+
+class SJISProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(SJISProber, self).__init__()
+ self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
+ self.distribution_analyzer = SJISDistributionAnalysis()
+ self.context_analyzer = SJISContextAnalysis()
+ self.reset()
+
+ def reset(self):
+ super(SJISProber, self).reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self):
+ return self.context_analyzer.charset_name
+
+ @property
+ def language(self):
+ return "Japanese"
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.context_analyzer.feed(self._last_char[2 - char_len:],
+ char_len)
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
+ - char_len], char_len)
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.context_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/universaldetector.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/universaldetector.py
new file mode 100755
index 00000000..7b4e92d6
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/universaldetector.py
@@ -0,0 +1,286 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+"""
+Module containing the UniversalDetector detector class, which is the primary
+class a user of ``chardet`` should use.
+
+:author: Mark Pilgrim (initial port to Python)
+:author: Shy Shalom (original C code)
+:author: Dan Blanchard (major refactoring for 3.0)
+:author: Ian Cordasco
+"""
+
+
+import codecs
+import logging
+import re
+
+from .charsetgroupprober import CharSetGroupProber
+from .enums import InputState, LanguageFilter, ProbingState
+from .escprober import EscCharSetProber
+from .latin1prober import Latin1Prober
+from .mbcsgroupprober import MBCSGroupProber
+from .sbcsgroupprober import SBCSGroupProber
+
+
+class UniversalDetector(object):
+ """
+ The ``UniversalDetector`` class underlies the ``chardet.detect`` function
+ and coordinates all of the different charset probers.
+
+ To get a ``dict`` containing an encoding and its confidence, you can simply
+ run:
+
+ .. code::
+
+ u = UniversalDetector()
+ u.feed(some_bytes)
+ u.close()
+ detected = u.result
+
+ """
+
+ MINIMUM_THRESHOLD = 0.20
+ HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
+ ESC_DETECTOR = re.compile(b'(\033|~{)')
+ WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
+ ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
+ 'iso-8859-2': 'Windows-1250',
+ 'iso-8859-5': 'Windows-1251',
+ 'iso-8859-6': 'Windows-1256',
+ 'iso-8859-7': 'Windows-1253',
+ 'iso-8859-8': 'Windows-1255',
+ 'iso-8859-9': 'Windows-1254',
+ 'iso-8859-13': 'Windows-1257'}
+
+ def __init__(self, lang_filter=LanguageFilter.ALL):
+ self._esc_charset_prober = None
+ self._charset_probers = []
+ self.result = None
+ self.done = None
+ self._got_data = None
+ self._input_state = None
+ self._last_char = None
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+ self._has_win_bytes = None
+ self.reset()
+
+ def reset(self):
+ """
+ Reset the UniversalDetector and all of its probers back to their
+ initial states. This is called by ``__init__``, so you only need to
+ call this directly in between analyses of different documents.
+ """
+ self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
+ self.done = False
+ self._got_data = False
+ self._has_win_bytes = False
+ self._input_state = InputState.PURE_ASCII
+ self._last_char = b''
+ if self._esc_charset_prober:
+ self._esc_charset_prober.reset()
+ for prober in self._charset_probers:
+ prober.reset()
+
+ def feed(self, byte_str):
+ """
+ Takes a chunk of a document and feeds it through all of the relevant
+ charset probers.
+
+ After calling ``feed``, you can check the value of the ``done``
+ attribute to see if you need to continue feeding the
+ ``UniversalDetector`` more data, or if it has made a prediction
+ (in the ``result`` attribute).
+
+ .. note::
+ You should always call ``close`` when you're done feeding in your
+ document if ``done`` is not already ``True``.
+ """
+ if self.done:
+ return
+
+ if not len(byte_str):
+ return
+
+ if not isinstance(byte_str, bytearray):
+ byte_str = bytearray(byte_str)
+
+ # First check for known BOMs, since these are guaranteed to be correct
+ if not self._got_data:
+ # If the data starts with BOM, we know it is UTF
+ if byte_str.startswith(codecs.BOM_UTF8):
+ # EF BB BF UTF-8 with BOM
+ self.result = {'encoding': "UTF-8-SIG",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith((codecs.BOM_UTF32_LE,
+ codecs.BOM_UTF32_BE)):
+ # FF FE 00 00 UTF-32, little-endian BOM
+ # 00 00 FE FF UTF-32, big-endian BOM
+ self.result = {'encoding': "UTF-32",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
+ # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
+ self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
+ # 00 00 FF FE UCS-4, unusual octet order BOM (2143)
+ self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
+ # FF FE UTF-16, little endian BOM
+ # FE FF UTF-16, big endian BOM
+ self.result = {'encoding': "UTF-16",
+ 'confidence': 1.0,
+ 'language': ''}
+
+ self._got_data = True
+ if self.result['encoding'] is not None:
+ self.done = True
+ return
+
+ # If none of those matched and we've only see ASCII so far, check
+ # for high bytes and escape sequences
+ if self._input_state == InputState.PURE_ASCII:
+ if self.HIGH_BYTE_DETECTOR.search(byte_str):
+ self._input_state = InputState.HIGH_BYTE
+ elif self._input_state == InputState.PURE_ASCII and \
+ self.ESC_DETECTOR.search(self._last_char + byte_str):
+ self._input_state = InputState.ESC_ASCII
+
+ self._last_char = byte_str[-1:]
+
+ # If we've seen escape sequences, use the EscCharSetProber, which
+ # uses a simple state machine to check for known escape sequences in
+ # HZ and ISO-2022 encodings, since those are the only encodings that
+ # use such sequences.
+ if self._input_state == InputState.ESC_ASCII:
+ if not self._esc_charset_prober:
+ self._esc_charset_prober = EscCharSetProber(self.lang_filter)
+ if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {'encoding':
+ self._esc_charset_prober.charset_name,
+ 'confidence':
+ self._esc_charset_prober.get_confidence(),
+ 'language':
+ self._esc_charset_prober.language}
+ self.done = True
+ # If we've seen high bytes (i.e., those with values greater than 127),
+ # we need to do more complicated checks using all our multi-byte and
+ # single-byte probers that are left. The single-byte probers
+ # use character bigram distributions to determine the encoding, whereas
+ # the multi-byte probers use a combination of character unigram and
+ # bigram distributions.
+ elif self._input_state == InputState.HIGH_BYTE:
+ if not self._charset_probers:
+ self._charset_probers = [MBCSGroupProber(self.lang_filter)]
+ # If we're checking non-CJK encodings, use single-byte prober
+ if self.lang_filter & LanguageFilter.NON_CJK:
+ self._charset_probers.append(SBCSGroupProber())
+ self._charset_probers.append(Latin1Prober())
+ for prober in self._charset_probers:
+ if prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {'encoding': prober.charset_name,
+ 'confidence': prober.get_confidence(),
+ 'language': prober.language}
+ self.done = True
+ break
+ if self.WIN_BYTE_DETECTOR.search(byte_str):
+ self._has_win_bytes = True
+
+ def close(self):
+ """
+ Stop analyzing the current document and come up with a final
+ prediction.
+
+ :returns: The ``result`` attribute, a ``dict`` with the keys
+ `encoding`, `confidence`, and `language`.
+ """
+ # Don't bother with checks if we're already done
+ if self.done:
+ return self.result
+ self.done = True
+
+ if not self._got_data:
+ self.logger.debug('no data received!')
+
+ # Default to ASCII if it is all we've seen so far
+ elif self._input_state == InputState.PURE_ASCII:
+ self.result = {'encoding': 'ascii',
+ 'confidence': 1.0,
+ 'language': ''}
+
+ # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
+ elif self._input_state == InputState.HIGH_BYTE:
+ prober_confidence = None
+ max_prober_confidence = 0.0
+ max_prober = None
+ for prober in self._charset_probers:
+ if not prober:
+ continue
+ prober_confidence = prober.get_confidence()
+ if prober_confidence > max_prober_confidence:
+ max_prober_confidence = prober_confidence
+ max_prober = prober
+ if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
+ charset_name = max_prober.charset_name
+ lower_charset_name = max_prober.charset_name.lower()
+ confidence = max_prober.get_confidence()
+ # Use Windows encoding name instead of ISO-8859 if we saw any
+ # extra Windows-specific bytes
+ if lower_charset_name.startswith('iso-8859'):
+ if self._has_win_bytes:
+ charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
+ charset_name)
+ self.result = {'encoding': charset_name,
+ 'confidence': confidence,
+ 'language': max_prober.language}
+
+ # Log all prober confidences if none met MINIMUM_THRESHOLD
+ if self.logger.getEffectiveLevel() == logging.DEBUG:
+ if self.result['encoding'] is None:
+ self.logger.debug('no probers hit minimum threshold')
+ for group_prober in self._charset_probers:
+ if not group_prober:
+ continue
+ if isinstance(group_prober, CharSetGroupProber):
+ for prober in group_prober.probers:
+ self.logger.debug('%s %s confidence = %s',
+ prober.charset_name,
+ prober.language,
+ prober.get_confidence())
+ else:
+ self.logger.debug('%s %s confidence = %s',
+ prober.charset_name,
+ prober.language,
+ prober.get_confidence())
+ return self.result
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/utf8prober.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/utf8prober.py
new file mode 100755
index 00000000..6c3196cc
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/utf8prober.py
@@ -0,0 +1,82 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+from .codingstatemachine import CodingStateMachine
+from .mbcssm import UTF8_SM_MODEL
+
+
+
+class UTF8Prober(CharSetProber):
+ ONE_CHAR_PROB = 0.5
+
+ def __init__(self):
+ super(UTF8Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
+ self._num_mb_chars = None
+ self.reset()
+
+ def reset(self):
+ super(UTF8Prober, self).reset()
+ self.coding_sm.reset()
+ self._num_mb_chars = 0
+
+ @property
+ def charset_name(self):
+ return "utf-8"
+
+ @property
+ def language(self):
+ return ""
+
+ def feed(self, byte_str):
+ for c in byte_str:
+ coding_state = self.coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ if self.coding_sm.get_current_charlen() >= 2:
+ self._num_mb_chars += 1
+
+ if self.state == ProbingState.DETECTING:
+ if self.get_confidence() > self.SHORTCUT_THRESHOLD:
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ unlike = 0.99
+ if self._num_mb_chars < 6:
+ unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
+ return 1.0 - unlike
+ else:
+ return unlike
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/version.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/version.py
new file mode 100755
index 00000000..bb2a34a7
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/chardet/version.py
@@ -0,0 +1,9 @@
+"""
+This module exists only to simplify retrieving the version number of chardet
+from within setup.py and from chardet subpackages.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+__version__ = "3.0.4"
+VERSION = __version__.split('.')
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/LICENSE
deleted file mode 100644
index ad82355b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2019 TAHRI Ahmed R.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA
deleted file mode 100644
index 1b04ed4c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA
+++ /dev/null
@@ -1,269 +0,0 @@
-Metadata-Version: 2.1
-Name: charset-normalizer
-Version: 2.0.12
-Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
-Home-page: https://github.com/ousret/charset_normalizer
-Author: Ahmed TAHRI @Ousret
-Author-email: ahmed.tahri@cloudnursery.dev
-License: MIT
-Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues
-Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest
-Keywords: encoding,i18n,txt,text,charset,charset-detector,normalization,unicode,chardet
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Intended Audience :: Developers
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: 3.11
-Classifier: Topic :: Text Processing :: Linguistic
-Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Typing :: Typed
-Requires-Python: >=3.5.0
-Description-Content-Type: text/markdown
-License-File: LICENSE
-Provides-Extra: unicode_backport
-Requires-Dist: unicodedata2 ; extra == 'unicode_backport'
-
-
-Charset Detection, for Everyone 👋
-
-
- The Real First Universal Charset Detector
-
-
-
-
-
-
-
-
-
-
-
-> A library that helps you read text from an unknown charset encoding. Motivated by `chardet`,
-> I'm trying to resolve the issue by taking a new approach.
-> All IANA character set names for which the Python core library provides codecs are supported.
-
-
- >>>>> 👉 Try Me Online Now, Then Adopt Me 👈 <<<<<
-
-
-This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
-
-| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
-| ------------- | :-------------: | :------------------: | :------------------: |
-| `Fast` | ❌ | ✅ | ✅ |
-| `Universal**` | ❌ | ✅ | ❌ |
-| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
-| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
-| `Free & Open` | ✅ | ✅ | ✅ |
-| `License` | LGPL-2.1 | MIT | MPL-1.1
-| `Native Python` | ✅ | ✅ | ❌ |
-| `Detect spoken language` | ❌ | ✅ | N/A |
-| `Supported Encoding` | 30 | :tada: [93](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40
-
-
-
-
-*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*
-Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html)
-
-## ⭐ Your support
-
-*Fork, test-it, star-it, submit your ideas! We do listen.*
-
-## ⚡ Performance
-
-This package offer better performance than its counterpart Chardet. Here are some numbers.
-
-| Package | Accuracy | Mean per file (ms) | File per sec (est) |
-| ------------- | :-------------: | :------------------: | :------------------: |
-| [chardet](https://github.com/chardet/chardet) | 92 % | 220 ms | 5 file/sec |
-| charset-normalizer | **98 %** | **40 ms** | 25 file/sec |
-
-| Package | 99th percentile | 95th percentile | 50th percentile |
-| ------------- | :-------------: | :------------------: | :------------------: |
-| [chardet](https://github.com/chardet/chardet) | 1115 ms | 300 ms | 27 ms |
-| charset-normalizer | 460 ms | 240 ms | 18 ms |
-
-Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
-
-> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
-> And yes, these results might change at any time. The dataset can be updated to include more files.
-> The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
-
-[cchardet](https://github.com/PyYoshi/cChardet) is a non-native (cpp binding) and unmaintained faster alternative with
-a better accuracy than chardet but lower than this package. If speed is the most important factor, you should try it.
-
-## ✨ Installation
-
-Using PyPi for latest stable
-```sh
-pip install charset-normalizer -U
-```
-
-If you want a more up-to-date `unicodedata` than the one available in your Python setup.
-```sh
-pip install charset-normalizer[unicode_backport] -U
-```
-
-## 🚀 Basic Usage
-
-### CLI
-This package comes with a CLI.
-
-```
-usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
- file [file ...]
-
-The Real First Universal Charset Detector. Discover originating encoding used
-on text file. Normalize text to unicode.
-
-positional arguments:
- files File(s) to be analysed
-
-optional arguments:
- -h, --help show this help message and exit
- -v, --verbose Display complementary information about file if any.
- Stdout will contain logs about the detection process.
- -a, --with-alternative
- Output complementary possibilities if any. Top-level
- JSON WILL be a list.
- -n, --normalize Permit to normalize input file. If not set, program
- does not write anything.
- -m, --minimal Only output the charset detected to STDOUT. Disabling
- JSON output.
- -r, --replace Replace file when trying to normalize it instead of
- creating a new one.
- -f, --force Replace file without asking if you are sure, use this
- flag with caution.
- -t THRESHOLD, --threshold THRESHOLD
- Define a custom maximum amount of chaos allowed in
- decoded content. 0. <= chaos <= 1.
- --version Show version information and exit.
-```
-
-```bash
-normalizer ./data/sample.1.fr.srt
-```
-
-:tada: Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
-
-```json
-{
- "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
- "encoding": "cp1252",
- "encoding_aliases": [
- "1252",
- "windows_1252"
- ],
- "alternative_encodings": [
- "cp1254",
- "cp1256",
- "cp1258",
- "iso8859_14",
- "iso8859_15",
- "iso8859_16",
- "iso8859_3",
- "iso8859_9",
- "latin_1",
- "mbcs"
- ],
- "language": "French",
- "alphabets": [
- "Basic Latin",
- "Latin-1 Supplement"
- ],
- "has_sig_or_bom": false,
- "chaos": 0.149,
- "coherence": 97.152,
- "unicode_path": null,
- "is_preferred": true
-}
-```
-
-### Python
-*Just print out normalized text*
-```python
-from charset_normalizer import from_path
-
-results = from_path('./my_subtitle.srt')
-
-print(str(results.best()))
-```
-
-*Normalize any text file*
-```python
-from charset_normalizer import normalize
-try:
- normalize('./my_subtitle.srt') # should write to disk my_subtitle-***.srt
-except IOError as e:
- print('Sadly, we are unable to perform charset normalization.', str(e))
-```
-
-*Upgrade your code without effort*
-```python
-from charset_normalizer import detect
-```
-
-The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
-
-See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
-
-## 😇 Why
-
-When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
-reliable alternative using a completely different method. Also! I never back down on a good challenge!
-
-I **don't care** about the **originating charset** encoding, because **two different tables** can
-produce **two identical rendered string.**
-What I want is to get readable text, the best I can.
-
-In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
-
-Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
-
-## 🍰 How
-
- - Discard all charset encoding table that could not fit the binary content.
- - Measure chaos, or the mess once opened (by chunks) with a corresponding charset encoding.
- - Extract matches with the lowest mess detected.
- - Additionally, we measure coherence / probe for a language.
-
-**Wait a minute**, what is chaos/mess and coherence according to **YOU ?**
-
-*Chaos :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
-**I established** some ground rules about **what is obvious** when **it seems like** a mess.
- I know that my interpretation of what is chaotic is very subjective, feel free to contribute in order to
- improve or rewrite it.
-
-*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
-that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
-
-## ⚡ Known limitations
-
- - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
- - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
-
-## 👤 Contributing
-
-Contributions, issues and feature requests are very much welcome.
-Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
-
-## 📝 License
-
-Copyright © 2019 [Ahmed TAHRI @Ousret](https://github.com/Ousret).
-This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
-
-Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD
deleted file mode 100644
index d5a8e21c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD
+++ /dev/null
@@ -1,21 +0,0 @@
-../../bin/normalizer,sha256=_6VNRnw7MyVCL-CINQ044FaHcpLHwQVgd-_D0cWq4KI,244
-charset_normalizer-2.0.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-charset_normalizer-2.0.12.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070
-charset_normalizer-2.0.12.dist-info/METADATA,sha256=eX-U3s7nb6wcvXZFyM1mdBf1yz4I0msVBgNvLEscAbo,11713
-charset_normalizer-2.0.12.dist-info/RECORD,,
-charset_normalizer-2.0.12.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-charset_normalizer-2.0.12.dist-info/entry_points.txt,sha256=5AJq_EPtGGUwJPgQLnBZfbVr-FYCIwT0xP7dIEZO3NI,77
-charset_normalizer-2.0.12.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
-charset_normalizer/__init__.py,sha256=x2A2OW29MBcqdxsvy6t1wzkUlH3ma0guxL6ZCfS8J94,1790
-charset_normalizer/api.py,sha256=r__Wz85F5pYOkRwEY5imXY_pCZ2Nil1DkdaAJY7T5o0,20303
-charset_normalizer/assets/__init__.py,sha256=FPnfk8limZRb8ZIUQcTvPEcbuM1eqOdWGw0vbWGycDs,25485
-charset_normalizer/cd.py,sha256=a9Kzzd9tHl_W08ExbCFMmRJqdo2k7EBQ8Z_3y9DmYsg,11076
-charset_normalizer/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-charset_normalizer/cli/normalizer.py,sha256=LkeFIRc1l28rOgXpEby695x0bcKQv4D8z9FmA3Z2c3A,9364
-charset_normalizer/constant.py,sha256=51u_RS10I1vYVpBao__xHqf--HHNrR6me1A1se5r5Y0,19449
-charset_normalizer/legacy.py,sha256=XKeZOts_HdYQU_Jb3C9ZfOjY2CiUL132k9_nXer8gig,3384
-charset_normalizer/md.py,sha256=WEwnu2MyIiMeEaorRduqcTxGjIBclWIG3i-9_UL6LLs,18191
-charset_normalizer/models.py,sha256=XrGpVxfonhcilIWC1WeiP3-ZORGEe_RG3sgrfPLl9qM,13303
-charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-charset_normalizer/utils.py,sha256=AWSL0z1B42IwdLfjX4ZMASA9cTUsTp0PweCdW98SI-4,9308
-charset_normalizer/version.py,sha256=uxO2cT0YIavQv4dQlNGmHPIOOwOa-exspxXi3IR7dck,80
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/WHEEL
deleted file mode 100644
index becc9a66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/entry_points.txt
deleted file mode 100644
index a67f60bc..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-normalizer = charset_normalizer.cli.normalizer:cli_detect
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/top_level.txt
deleted file mode 100644
index 66958f0a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer-2.0.12.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-charset_normalizer
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/__init__.py
deleted file mode 100644
index 1aea851a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/__init__.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf_8 -*-
-"""
-Charset-Normalizer
-~~~~~~~~~~~~~~
-The Real First Universal Charset Detector.
-A library that helps you read text from an unknown charset encoding.
-Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
-All IANA character set names for which the Python core library provides codecs are supported.
-
-Basic usage:
- >>> from charset_normalizer import from_bytes
- >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
- >>> best_guess = results.best()
- >>> str(best_guess)
- 'Bсеки човек има право на образование. Oбразованието!'
-
-Others methods and usages are available - see the full documentation
-at .
-:copyright: (c) 2021 by Ahmed TAHRI
-:license: MIT, see LICENSE for more details.
-"""
-import logging
-
-from .api import from_bytes, from_fp, from_path, normalize
-from .legacy import (
- CharsetDetector,
- CharsetDoctor,
- CharsetNormalizerMatch,
- CharsetNormalizerMatches,
- detect,
-)
-from .models import CharsetMatch, CharsetMatches
-from .utils import set_logging_handler
-from .version import VERSION, __version__
-
-__all__ = (
- "from_fp",
- "from_path",
- "from_bytes",
- "normalize",
- "detect",
- "CharsetMatch",
- "CharsetMatches",
- "CharsetNormalizerMatch",
- "CharsetNormalizerMatches",
- "CharsetDetector",
- "CharsetDoctor",
- "__version__",
- "VERSION",
- "set_logging_handler",
-)
-
-# Attach a NullHandler to the top level logger by default
-# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
-
-logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/api.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/api.py
deleted file mode 100644
index bdc8ed98..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/api.py
+++ /dev/null
@@ -1,608 +0,0 @@
-import logging
-from os.path import basename, splitext
-from typing import BinaryIO, List, Optional, Set
-
-try:
- from os import PathLike
-except ImportError: # pragma: no cover
- PathLike = str # type: ignore
-
-from .cd import (
- coherence_ratio,
- encoding_languages,
- mb_encoding_languages,
- merge_coherence_ratios,
-)
-from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
-from .md import mess_ratio
-from .models import CharsetMatch, CharsetMatches
-from .utils import (
- any_specified_encoding,
- iana_name,
- identify_sig_or_bom,
- is_cp_similar,
- is_multi_byte_encoding,
- should_strip_sig_or_bom,
-)
-
-# Will most likely be controversial
-# logging.addLevelName(TRACE, "TRACE")
-logger = logging.getLogger("charset_normalizer")
-explain_handler = logging.StreamHandler()
-explain_handler.setFormatter(
- logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
-)
-
-
-def from_bytes(
- sequences: bytes,
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.2,
- cp_isolation: List[str] = None,
- cp_exclusion: List[str] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
-) -> CharsetMatches:
- """
- Given a raw bytes sequence, return the best possibles charset usable to render str objects.
- If there is no results, it is a strong indicator that the source is binary/not text.
- By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
- And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
-
- The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
- but never take it for granted. Can improve the performance.
-
- You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
- purpose.
-
- This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
- By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
- toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
- Custom logging format and handler can be set manually.
- """
-
- if not isinstance(sequences, (bytearray, bytes)):
- raise TypeError(
- "Expected object of type bytes or bytearray, got: {0}".format(
- type(sequences)
- )
- )
-
- if explain:
- previous_logger_level = logger.level # type: int
- logger.addHandler(explain_handler)
- logger.setLevel(TRACE)
-
- length = len(sequences) # type: int
-
- if length == 0:
- logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level or logging.WARNING)
- return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
-
- if cp_isolation is not None:
- logger.log(
- TRACE,
- "cp_isolation is set. use this flag for debugging purpose. "
- "limited list of encoding allowed : %s.",
- ", ".join(cp_isolation),
- )
- cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
- else:
- cp_isolation = []
-
- if cp_exclusion is not None:
- logger.log(
- TRACE,
- "cp_exclusion is set. use this flag for debugging purpose. "
- "limited list of encoding excluded : %s.",
- ", ".join(cp_exclusion),
- )
- cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
- else:
- cp_exclusion = []
-
- if length <= (chunk_size * steps):
- logger.log(
- TRACE,
- "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
- steps,
- chunk_size,
- length,
- )
- steps = 1
- chunk_size = length
-
- if steps > 1 and length / steps < chunk_size:
- chunk_size = int(length / steps)
-
- is_too_small_sequence = len(sequences) < TOO_SMALL_SEQUENCE # type: bool
- is_too_large_sequence = len(sequences) >= TOO_BIG_SEQUENCE # type: bool
-
- if is_too_small_sequence:
- logger.log(
- TRACE,
- "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
- length
- ),
- )
- elif is_too_large_sequence:
- logger.log(
- TRACE,
- "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
- length
- ),
- )
-
- prioritized_encodings = [] # type: List[str]
-
- specified_encoding = (
- any_specified_encoding(sequences) if preemptive_behaviour else None
- ) # type: Optional[str]
-
- if specified_encoding is not None:
- prioritized_encodings.append(specified_encoding)
- logger.log(
- TRACE,
- "Detected declarative mark in sequence. Priority +1 given for %s.",
- specified_encoding,
- )
-
- tested = set() # type: Set[str]
- tested_but_hard_failure = [] # type: List[str]
- tested_but_soft_failure = [] # type: List[str]
-
- fallback_ascii = None # type: Optional[CharsetMatch]
- fallback_u8 = None # type: Optional[CharsetMatch]
- fallback_specified = None # type: Optional[CharsetMatch]
-
- results = CharsetMatches() # type: CharsetMatches
-
- sig_encoding, sig_payload = identify_sig_or_bom(sequences)
-
- if sig_encoding is not None:
- prioritized_encodings.append(sig_encoding)
- logger.log(
- TRACE,
- "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
- len(sig_payload),
- sig_encoding,
- )
-
- prioritized_encodings.append("ascii")
-
- if "utf_8" not in prioritized_encodings:
- prioritized_encodings.append("utf_8")
-
- for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
-
- if cp_isolation and encoding_iana not in cp_isolation:
- continue
-
- if cp_exclusion and encoding_iana in cp_exclusion:
- continue
-
- if encoding_iana in tested:
- continue
-
- tested.add(encoding_iana)
-
- decoded_payload = None # type: Optional[str]
- bom_or_sig_available = sig_encoding == encoding_iana # type: bool
- strip_sig_or_bom = bom_or_sig_available and should_strip_sig_or_bom(
- encoding_iana
- ) # type: bool
-
- if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
- logger.log(
- TRACE,
- "Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
- encoding_iana,
- )
- continue
-
- try:
- is_multi_byte_decoder = is_multi_byte_encoding(encoding_iana) # type: bool
- except (ModuleNotFoundError, ImportError):
- logger.log(
- TRACE,
- "Encoding %s does not provide an IncrementalDecoder",
- encoding_iana,
- )
- continue
-
- try:
- if is_too_large_sequence and is_multi_byte_decoder is False:
- str(
- sequences[: int(50e4)]
- if strip_sig_or_bom is False
- else sequences[len(sig_payload) : int(50e4)],
- encoding=encoding_iana,
- )
- else:
- decoded_payload = str(
- sequences
- if strip_sig_or_bom is False
- else sequences[len(sig_payload) :],
- encoding=encoding_iana,
- )
- except (UnicodeDecodeError, LookupError) as e:
- if not isinstance(e, LookupError):
- logger.log(
- TRACE,
- "Code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- tested_but_hard_failure.append(encoding_iana)
- continue
-
- similar_soft_failure_test = False # type: bool
-
- for encoding_soft_failed in tested_but_soft_failure:
- if is_cp_similar(encoding_iana, encoding_soft_failed):
- similar_soft_failure_test = True
- break
-
- if similar_soft_failure_test:
- logger.log(
- TRACE,
- "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
- encoding_iana,
- encoding_soft_failed,
- )
- continue
-
- r_ = range(
- 0 if not bom_or_sig_available else len(sig_payload),
- length,
- int(length / steps),
- )
-
- multi_byte_bonus = (
- is_multi_byte_decoder
- and decoded_payload is not None
- and len(decoded_payload) < length
- ) # type: bool
-
- if multi_byte_bonus:
- logger.log(
- TRACE,
- "Code page %s is a multi byte encoding table and it appear that at least one character "
- "was encoded using n-bytes.",
- encoding_iana,
- )
-
- max_chunk_gave_up = int(len(r_) / 4) # type: int
-
- max_chunk_gave_up = max(max_chunk_gave_up, 2)
- early_stop_count = 0 # type: int
- lazy_str_hard_failure = False
-
- md_chunks = [] # type: List[str]
- md_ratios = []
-
- for i in r_:
- if i + chunk_size > length + 8:
- continue
-
- cut_sequence = sequences[i : i + chunk_size]
-
- if bom_or_sig_available and strip_sig_or_bom is False:
- cut_sequence = sig_payload + cut_sequence
-
- try:
- chunk = cut_sequence.decode(
- encoding_iana,
- errors="ignore" if is_multi_byte_decoder else "strict",
- ) # type: str
- except UnicodeDecodeError as e: # Lazy str loading may have missed something there
- logger.log(
- TRACE,
- "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- early_stop_count = max_chunk_gave_up
- lazy_str_hard_failure = True
- break
-
- # multi-byte bad cutting detector and adjustment
- # not the cleanest way to perform that fix but clever enough for now.
- if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80:
-
- chunk_partial_size_chk = min(chunk_size, 16) # type: int
-
- if (
- decoded_payload
- and chunk[:chunk_partial_size_chk] not in decoded_payload
- ):
- for j in range(i, i - 4, -1):
- cut_sequence = sequences[j : i + chunk_size]
-
- if bom_or_sig_available and strip_sig_or_bom is False:
- cut_sequence = sig_payload + cut_sequence
-
- chunk = cut_sequence.decode(encoding_iana, errors="ignore")
-
- if chunk[:chunk_partial_size_chk] in decoded_payload:
- break
-
- md_chunks.append(chunk)
-
- md_ratios.append(mess_ratio(chunk, threshold))
-
- if md_ratios[-1] >= threshold:
- early_stop_count += 1
-
- if (early_stop_count >= max_chunk_gave_up) or (
- bom_or_sig_available and strip_sig_or_bom is False
- ):
- break
-
- # We might want to check the sequence again with the whole content
- # Only if initial MD tests passes
- if (
- not lazy_str_hard_failure
- and is_too_large_sequence
- and not is_multi_byte_decoder
- ):
- try:
- sequences[int(50e3) :].decode(encoding_iana, errors="strict")
- except UnicodeDecodeError as e:
- logger.log(
- TRACE,
- "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
- encoding_iana,
- str(e),
- )
- tested_but_hard_failure.append(encoding_iana)
- continue
-
- mean_mess_ratio = (
- sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
- ) # type: float
- if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
- tested_but_soft_failure.append(encoding_iana)
- logger.log(
- TRACE,
- "%s was excluded because of initial chaos probing. Gave up %i time(s). "
- "Computed mean chaos is %f %%.",
- encoding_iana,
- early_stop_count,
- round(mean_mess_ratio * 100, ndigits=3),
- )
- # Preparing those fallbacks in case we got nothing.
- if (
- encoding_iana in ["ascii", "utf_8", specified_encoding]
- and not lazy_str_hard_failure
- ):
- fallback_entry = CharsetMatch(
- sequences, encoding_iana, threshold, False, [], decoded_payload
- )
- if encoding_iana == specified_encoding:
- fallback_specified = fallback_entry
- elif encoding_iana == "ascii":
- fallback_ascii = fallback_entry
- else:
- fallback_u8 = fallback_entry
- continue
-
- logger.log(
- TRACE,
- "%s passed initial chaos probing. Mean measured chaos is %f %%",
- encoding_iana,
- round(mean_mess_ratio * 100, ndigits=3),
- )
-
- if not is_multi_byte_decoder:
- target_languages = encoding_languages(encoding_iana) # type: List[str]
- else:
- target_languages = mb_encoding_languages(encoding_iana)
-
- if target_languages:
- logger.log(
- TRACE,
- "{} should target any language(s) of {}".format(
- encoding_iana, str(target_languages)
- ),
- )
-
- cd_ratios = []
-
- # We shall skip the CD when its about ASCII
- # Most of the time its not relevant to run "language-detection" on it.
- if encoding_iana != "ascii":
- for chunk in md_chunks:
- chunk_languages = coherence_ratio(
- chunk, 0.1, ",".join(target_languages) if target_languages else None
- )
-
- cd_ratios.append(chunk_languages)
-
- cd_ratios_merged = merge_coherence_ratios(cd_ratios)
-
- if cd_ratios_merged:
- logger.log(
- TRACE,
- "We detected language {} using {}".format(
- cd_ratios_merged, encoding_iana
- ),
- )
-
- results.append(
- CharsetMatch(
- sequences,
- encoding_iana,
- mean_mess_ratio,
- bom_or_sig_available,
- cd_ratios_merged,
- decoded_payload,
- )
- )
-
- if (
- encoding_iana in [specified_encoding, "ascii", "utf_8"]
- and mean_mess_ratio < 0.1
- ):
- logger.debug(
- "Encoding detection: %s is most likely the one.", encoding_iana
- )
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
- return CharsetMatches([results[encoding_iana]])
-
- if encoding_iana == sig_encoding:
- logger.debug(
- "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
- "the beginning of the sequence.",
- encoding_iana,
- )
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
- return CharsetMatches([results[encoding_iana]])
-
- if len(results) == 0:
- if fallback_u8 or fallback_ascii or fallback_specified:
- logger.log(
- TRACE,
- "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
- )
-
- if fallback_specified:
- logger.debug(
- "Encoding detection: %s will be used as a fallback match",
- fallback_specified.encoding,
- )
- results.append(fallback_specified)
- elif (
- (fallback_u8 and fallback_ascii is None)
- or (
- fallback_u8
- and fallback_ascii
- and fallback_u8.fingerprint != fallback_ascii.fingerprint
- )
- or (fallback_u8 is not None)
- ):
- logger.debug("Encoding detection: utf_8 will be used as a fallback match")
- results.append(fallback_u8)
- elif fallback_ascii:
- logger.debug("Encoding detection: ascii will be used as a fallback match")
- results.append(fallback_ascii)
-
- if results:
- logger.debug(
- "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
- results.best().encoding, # type: ignore
- len(results) - 1,
- )
- else:
- logger.debug("Encoding detection: Unable to determine any suitable charset.")
-
- if explain:
- logger.removeHandler(explain_handler)
- logger.setLevel(previous_logger_level)
-
- return results
-
-
-def from_fp(
- fp: BinaryIO,
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: List[str] = None,
- cp_exclusion: List[str] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
-) -> CharsetMatches:
- """
- Same thing than the function from_bytes but using a file pointer that is already ready.
- Will not close the file pointer.
- """
- return from_bytes(
- fp.read(),
- steps,
- chunk_size,
- threshold,
- cp_isolation,
- cp_exclusion,
- preemptive_behaviour,
- explain,
- )
-
-
-def from_path(
- path: PathLike,
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: List[str] = None,
- cp_exclusion: List[str] = None,
- preemptive_behaviour: bool = True,
- explain: bool = False,
-) -> CharsetMatches:
- """
- Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
- Can raise IOError.
- """
- with open(path, "rb") as fp:
- return from_fp(
- fp,
- steps,
- chunk_size,
- threshold,
- cp_isolation,
- cp_exclusion,
- preemptive_behaviour,
- explain,
- )
-
-
-def normalize(
- path: PathLike,
- steps: int = 5,
- chunk_size: int = 512,
- threshold: float = 0.20,
- cp_isolation: List[str] = None,
- cp_exclusion: List[str] = None,
- preemptive_behaviour: bool = True,
-) -> CharsetMatch:
- """
- Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
- """
- results = from_path(
- path,
- steps,
- chunk_size,
- threshold,
- cp_isolation,
- cp_exclusion,
- preemptive_behaviour,
- )
-
- filename = basename(path)
- target_extensions = list(splitext(filename))
-
- if len(results) == 0:
- raise IOError(
- 'Unable to normalize "{}", no encoding charset seems to fit.'.format(
- filename
- )
- )
-
- result = results.best()
-
- target_extensions[0] += "-" + result.encoding # type: ignore
-
- with open(
- "{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
- ) as fp:
- fp.write(result.output()) # type: ignore
-
- return result # type: ignore
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/assets/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/assets/__init__.py
deleted file mode 100644
index b2e56ff3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/assets/__init__.py
+++ /dev/null
@@ -1,1244 +0,0 @@
-# -*- coding: utf_8 -*-
-from collections import OrderedDict
-
-FREQUENCIES = OrderedDict(
- [
- (
- "English",
- [
- "e",
- "a",
- "t",
- "i",
- "o",
- "n",
- "s",
- "r",
- "h",
- "l",
- "d",
- "c",
- "u",
- "m",
- "f",
- "p",
- "g",
- "w",
- "y",
- "b",
- "v",
- "k",
- "x",
- "j",
- "z",
- "q",
- ],
- ),
- (
- "German",
- [
- "e",
- "n",
- "i",
- "r",
- "s",
- "t",
- "a",
- "d",
- "h",
- "u",
- "l",
- "g",
- "o",
- "c",
- "m",
- "b",
- "f",
- "k",
- "w",
- "z",
- "p",
- "v",
- "ü",
- "ä",
- "ö",
- "j",
- ],
- ),
- (
- "French",
- [
- "e",
- "a",
- "s",
- "n",
- "i",
- "t",
- "r",
- "l",
- "u",
- "o",
- "d",
- "c",
- "p",
- "m",
- "é",
- "v",
- "g",
- "f",
- "b",
- "h",
- "q",
- "à",
- "x",
- "è",
- "y",
- "j",
- ],
- ),
- (
- "Dutch",
- [
- "e",
- "n",
- "a",
- "i",
- "r",
- "t",
- "o",
- "d",
- "s",
- "l",
- "g",
- "h",
- "v",
- "m",
- "u",
- "k",
- "c",
- "p",
- "b",
- "w",
- "j",
- "z",
- "f",
- "y",
- "x",
- "ë",
- ],
- ),
- (
- "Italian",
- [
- "e",
- "i",
- "a",
- "o",
- "n",
- "l",
- "t",
- "r",
- "s",
- "c",
- "d",
- "u",
- "p",
- "m",
- "g",
- "v",
- "f",
- "b",
- "z",
- "h",
- "q",
- "è",
- "à",
- "k",
- "y",
- "ò",
- ],
- ),
- (
- "Polish",
- [
- "a",
- "i",
- "o",
- "e",
- "n",
- "r",
- "z",
- "w",
- "s",
- "c",
- "t",
- "k",
- "y",
- "d",
- "p",
- "m",
- "u",
- "l",
- "j",
- "ł",
- "g",
- "b",
- "h",
- "ą",
- "ę",
- "ó",
- ],
- ),
- (
- "Spanish",
- [
- "e",
- "a",
- "o",
- "n",
- "s",
- "r",
- "i",
- "l",
- "d",
- "t",
- "c",
- "u",
- "m",
- "p",
- "b",
- "g",
- "v",
- "f",
- "y",
- "ó",
- "h",
- "q",
- "í",
- "j",
- "z",
- "á",
- ],
- ),
- (
- "Russian",
- [
- "о",
- "а",
- "е",
- "и",
- "н",
- "с",
- "т",
- "р",
- "в",
- "л",
- "к",
- "м",
- "д",
- "п",
- "у",
- "г",
- "я",
- "ы",
- "з",
- "б",
- "й",
- "ь",
- "ч",
- "х",
- "ж",
- "ц",
- ],
- ),
- (
- "Japanese",
- [
- "の",
- "に",
- "る",
- "た",
- "は",
- "ー",
- "と",
- "し",
- "を",
- "で",
- "て",
- "が",
- "い",
- "ン",
- "れ",
- "な",
- "年",
- "ス",
- "っ",
- "ル",
- "か",
- "ら",
- "あ",
- "さ",
- "も",
- "り",
- ],
- ),
- (
- "Portuguese",
- [
- "a",
- "e",
- "o",
- "s",
- "i",
- "r",
- "d",
- "n",
- "t",
- "m",
- "u",
- "c",
- "l",
- "p",
- "g",
- "v",
- "b",
- "f",
- "h",
- "ã",
- "q",
- "é",
- "ç",
- "á",
- "z",
- "í",
- ],
- ),
- (
- "Swedish",
- [
- "e",
- "a",
- "n",
- "r",
- "t",
- "s",
- "i",
- "l",
- "d",
- "o",
- "m",
- "k",
- "g",
- "v",
- "h",
- "f",
- "u",
- "p",
- "ä",
- "c",
- "b",
- "ö",
- "å",
- "y",
- "j",
- "x",
- ],
- ),
- (
- "Chinese",
- [
- "的",
- "一",
- "是",
- "不",
- "了",
- "在",
- "人",
- "有",
- "我",
- "他",
- "这",
- "个",
- "们",
- "中",
- "来",
- "上",
- "大",
- "为",
- "和",
- "国",
- "地",
- "到",
- "以",
- "说",
- "时",
- "要",
- "就",
- "出",
- "会",
- ],
- ),
- (
- "Ukrainian",
- [
- "о",
- "а",
- "н",
- "і",
- "и",
- "р",
- "в",
- "т",
- "е",
- "с",
- "к",
- "л",
- "у",
- "д",
- "м",
- "п",
- "з",
- "я",
- "ь",
- "б",
- "г",
- "й",
- "ч",
- "х",
- "ц",
- "ї",
- ],
- ),
- (
- "Norwegian",
- [
- "e",
- "r",
- "n",
- "t",
- "a",
- "s",
- "i",
- "o",
- "l",
- "d",
- "g",
- "k",
- "m",
- "v",
- "f",
- "p",
- "u",
- "b",
- "h",
- "å",
- "y",
- "j",
- "ø",
- "c",
- "æ",
- "w",
- ],
- ),
- (
- "Finnish",
- [
- "a",
- "i",
- "n",
- "t",
- "e",
- "s",
- "l",
- "o",
- "u",
- "k",
- "ä",
- "m",
- "r",
- "v",
- "j",
- "h",
- "p",
- "y",
- "d",
- "ö",
- "g",
- "c",
- "b",
- "f",
- "w",
- "z",
- ],
- ),
- (
- "Vietnamese",
- [
- "n",
- "h",
- "t",
- "i",
- "c",
- "g",
- "a",
- "o",
- "u",
- "m",
- "l",
- "r",
- "à",
- "đ",
- "s",
- "e",
- "v",
- "p",
- "b",
- "y",
- "ư",
- "d",
- "á",
- "k",
- "ộ",
- "ế",
- ],
- ),
- (
- "Czech",
- [
- "o",
- "e",
- "a",
- "n",
- "t",
- "s",
- "i",
- "l",
- "v",
- "r",
- "k",
- "d",
- "u",
- "m",
- "p",
- "í",
- "c",
- "h",
- "z",
- "á",
- "y",
- "j",
- "b",
- "ě",
- "é",
- "ř",
- ],
- ),
- (
- "Hungarian",
- [
- "e",
- "a",
- "t",
- "l",
- "s",
- "n",
- "k",
- "r",
- "i",
- "o",
- "z",
- "á",
- "é",
- "g",
- "m",
- "b",
- "y",
- "v",
- "d",
- "h",
- "u",
- "p",
- "j",
- "ö",
- "f",
- "c",
- ],
- ),
- (
- "Korean",
- [
- "이",
- "다",
- "에",
- "의",
- "는",
- "로",
- "하",
- "을",
- "가",
- "고",
- "지",
- "서",
- "한",
- "은",
- "기",
- "으",
- "년",
- "대",
- "사",
- "시",
- "를",
- "리",
- "도",
- "인",
- "스",
- "일",
- ],
- ),
- (
- "Indonesian",
- [
- "a",
- "n",
- "e",
- "i",
- "r",
- "t",
- "u",
- "s",
- "d",
- "k",
- "m",
- "l",
- "g",
- "p",
- "b",
- "o",
- "h",
- "y",
- "j",
- "c",
- "w",
- "f",
- "v",
- "z",
- "x",
- "q",
- ],
- ),
- (
- "Turkish",
- [
- "a",
- "e",
- "i",
- "n",
- "r",
- "l",
- "ı",
- "k",
- "d",
- "t",
- "s",
- "m",
- "y",
- "u",
- "o",
- "b",
- "ü",
- "ş",
- "v",
- "g",
- "z",
- "h",
- "c",
- "p",
- "ç",
- "ğ",
- ],
- ),
- (
- "Romanian",
- [
- "e",
- "i",
- "a",
- "r",
- "n",
- "t",
- "u",
- "l",
- "o",
- "c",
- "s",
- "d",
- "p",
- "m",
- "ă",
- "f",
- "v",
- "î",
- "g",
- "b",
- "ș",
- "ț",
- "z",
- "h",
- "â",
- "j",
- ],
- ),
- (
- "Farsi",
- [
- "ا",
- "ی",
- "ر",
- "د",
- "ن",
- "ه",
- "و",
- "م",
- "ت",
- "ب",
- "س",
- "ل",
- "ک",
- "ش",
- "ز",
- "ف",
- "گ",
- "ع",
- "خ",
- "ق",
- "ج",
- "آ",
- "پ",
- "ح",
- "ط",
- "ص",
- ],
- ),
- (
- "Arabic",
- [
- "ا",
- "ل",
- "ي",
- "م",
- "و",
- "ن",
- "ر",
- "ت",
- "ب",
- "ة",
- "ع",
- "د",
- "س",
- "ف",
- "ه",
- "ك",
- "ق",
- "أ",
- "ح",
- "ج",
- "ش",
- "ط",
- "ص",
- "ى",
- "خ",
- "إ",
- ],
- ),
- (
- "Danish",
- [
- "e",
- "r",
- "n",
- "t",
- "a",
- "i",
- "s",
- "d",
- "l",
- "o",
- "g",
- "m",
- "k",
- "f",
- "v",
- "u",
- "b",
- "h",
- "p",
- "å",
- "y",
- "ø",
- "æ",
- "c",
- "j",
- "w",
- ],
- ),
- (
- "Serbian",
- [
- "а",
- "и",
- "о",
- "е",
- "н",
- "р",
- "с",
- "у",
- "т",
- "к",
- "ј",
- "в",
- "д",
- "м",
- "п",
- "л",
- "г",
- "з",
- "б",
- "a",
- "i",
- "e",
- "o",
- "n",
- "ц",
- "ш",
- ],
- ),
- (
- "Lithuanian",
- [
- "i",
- "a",
- "s",
- "o",
- "r",
- "e",
- "t",
- "n",
- "u",
- "k",
- "m",
- "l",
- "p",
- "v",
- "d",
- "j",
- "g",
- "ė",
- "b",
- "y",
- "ų",
- "š",
- "ž",
- "c",
- "ą",
- "į",
- ],
- ),
- (
- "Slovene",
- [
- "e",
- "a",
- "i",
- "o",
- "n",
- "r",
- "s",
- "l",
- "t",
- "j",
- "v",
- "k",
- "d",
- "p",
- "m",
- "u",
- "z",
- "b",
- "g",
- "h",
- "č",
- "c",
- "š",
- "ž",
- "f",
- "y",
- ],
- ),
- (
- "Slovak",
- [
- "o",
- "a",
- "e",
- "n",
- "i",
- "r",
- "v",
- "t",
- "s",
- "l",
- "k",
- "d",
- "m",
- "p",
- "u",
- "c",
- "h",
- "j",
- "b",
- "z",
- "á",
- "y",
- "ý",
- "í",
- "č",
- "é",
- ],
- ),
- (
- "Hebrew",
- [
- "י",
- "ו",
- "ה",
- "ל",
- "ר",
- "ב",
- "ת",
- "מ",
- "א",
- "ש",
- "נ",
- "ע",
- "ם",
- "ד",
- "ק",
- "ח",
- "פ",
- "ס",
- "כ",
- "ג",
- "ט",
- "צ",
- "ן",
- "ז",
- "ך",
- ],
- ),
- (
- "Bulgarian",
- [
- "а",
- "и",
- "о",
- "е",
- "н",
- "т",
- "р",
- "с",
- "в",
- "л",
- "к",
- "д",
- "п",
- "м",
- "з",
- "г",
- "я",
- "ъ",
- "у",
- "б",
- "ч",
- "ц",
- "й",
- "ж",
- "щ",
- "х",
- ],
- ),
- (
- "Croatian",
- [
- "a",
- "i",
- "o",
- "e",
- "n",
- "r",
- "j",
- "s",
- "t",
- "u",
- "k",
- "l",
- "v",
- "d",
- "m",
- "p",
- "g",
- "z",
- "b",
- "c",
- "č",
- "h",
- "š",
- "ž",
- "ć",
- "f",
- ],
- ),
- (
- "Hindi",
- [
- "क",
- "र",
- "स",
- "न",
- "त",
- "म",
- "ह",
- "प",
- "य",
- "ल",
- "व",
- "ज",
- "द",
- "ग",
- "ब",
- "श",
- "ट",
- "अ",
- "ए",
- "थ",
- "भ",
- "ड",
- "च",
- "ध",
- "ष",
- "इ",
- ],
- ),
- (
- "Estonian",
- [
- "a",
- "i",
- "e",
- "s",
- "t",
- "l",
- "u",
- "n",
- "o",
- "k",
- "r",
- "d",
- "m",
- "v",
- "g",
- "p",
- "j",
- "h",
- "ä",
- "b",
- "õ",
- "ü",
- "f",
- "c",
- "ö",
- "y",
- ],
- ),
- (
- "Simple English",
- [
- "e",
- "a",
- "t",
- "i",
- "o",
- "n",
- "s",
- "r",
- "h",
- "l",
- "d",
- "c",
- "m",
- "u",
- "f",
- "p",
- "g",
- "w",
- "b",
- "y",
- "v",
- "k",
- "j",
- "x",
- "z",
- "q",
- ],
- ),
- (
- "Thai",
- [
- "า",
- "น",
- "ร",
- "อ",
- "ก",
- "เ",
- "ง",
- "ม",
- "ย",
- "ล",
- "ว",
- "ด",
- "ท",
- "ส",
- "ต",
- "ะ",
- "ป",
- "บ",
- "ค",
- "ห",
- "แ",
- "จ",
- "พ",
- "ช",
- "ข",
- "ใ",
- ],
- ),
- (
- "Greek",
- [
- "α",
- "τ",
- "ο",
- "ι",
- "ε",
- "ν",
- "ρ",
- "σ",
- "κ",
- "η",
- "π",
- "ς",
- "υ",
- "μ",
- "λ",
- "ί",
- "ό",
- "ά",
- "γ",
- "έ",
- "δ",
- "ή",
- "ω",
- "χ",
- "θ",
- "ύ",
- ],
- ),
- (
- "Tamil",
- [
- "க",
- "த",
- "ப",
- "ட",
- "ர",
- "ம",
- "ல",
- "ன",
- "வ",
- "ற",
- "ய",
- "ள",
- "ச",
- "ந",
- "இ",
- "ண",
- "அ",
- "ஆ",
- "ழ",
- "ங",
- "எ",
- "உ",
- "ஒ",
- "ஸ",
- ],
- ),
- (
- "Classical Chinese",
- [
- "之",
- "年",
- "為",
- "也",
- "以",
- "一",
- "人",
- "其",
- "者",
- "國",
- "有",
- "二",
- "十",
- "於",
- "曰",
- "三",
- "不",
- "大",
- "而",
- "子",
- "中",
- "五",
- "四",
- ],
- ),
- (
- "Kazakh",
- [
- "а",
- "ы",
- "е",
- "н",
- "т",
- "р",
- "л",
- "і",
- "д",
- "с",
- "м",
- "қ",
- "к",
- "о",
- "б",
- "и",
- "у",
- "ғ",
- "ж",
- "ң",
- "з",
- "ш",
- "й",
- "п",
- "г",
- "ө",
- ],
- ),
- ]
-)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cd.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cd.py
deleted file mode 100644
index 8429a0eb..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cd.py
+++ /dev/null
@@ -1,340 +0,0 @@
-import importlib
-from codecs import IncrementalDecoder
-from collections import Counter, OrderedDict
-from functools import lru_cache
-from typing import Dict, List, Optional, Tuple
-
-from .assets import FREQUENCIES
-from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
-from .md import is_suspiciously_successive_range
-from .models import CoherenceMatches
-from .utils import (
- is_accentuated,
- is_latin,
- is_multi_byte_encoding,
- is_unicode_range_secondary,
- unicode_range,
-)
-
-
-def encoding_unicode_range(iana_name: str) -> List[str]:
- """
- Return associated unicode ranges in a single byte code page.
- """
- if is_multi_byte_encoding(iana_name):
- raise IOError("Function not supported on multi-byte code page")
-
- decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
-
- p = decoder(errors="ignore") # type: IncrementalDecoder
- seen_ranges = {} # type: Dict[str, int]
- character_count = 0 # type: int
-
- for i in range(0x40, 0xFF):
- chunk = p.decode(bytes([i])) # type: str
-
- if chunk:
- character_range = unicode_range(chunk) # type: Optional[str]
-
- if character_range is None:
- continue
-
- if is_unicode_range_secondary(character_range) is False:
- if character_range not in seen_ranges:
- seen_ranges[character_range] = 0
- seen_ranges[character_range] += 1
- character_count += 1
-
- return sorted(
- [
- character_range
- for character_range in seen_ranges
- if seen_ranges[character_range] / character_count >= 0.15
- ]
- )
-
-
-def unicode_range_languages(primary_range: str) -> List[str]:
- """
- Return inferred languages used with a unicode range.
- """
- languages = [] # type: List[str]
-
- for language, characters in FREQUENCIES.items():
- for character in characters:
- if unicode_range(character) == primary_range:
- languages.append(language)
- break
-
- return languages
-
-
-@lru_cache()
-def encoding_languages(iana_name: str) -> List[str]:
- """
- Single-byte encoding language association. Some code page are heavily linked to particular language(s).
- This function does the correspondence.
- """
- unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
- primary_range = None # type: Optional[str]
-
- for specified_range in unicode_ranges:
- if "Latin" not in specified_range:
- primary_range = specified_range
- break
-
- if primary_range is None:
- return ["Latin Based"]
-
- return unicode_range_languages(primary_range)
-
-
-@lru_cache()
-def mb_encoding_languages(iana_name: str) -> List[str]:
- """
- Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
- This function does the correspondence.
- """
- if (
- iana_name.startswith("shift_")
- or iana_name.startswith("iso2022_jp")
- or iana_name.startswith("euc_j")
- or iana_name == "cp932"
- ):
- return ["Japanese"]
- if iana_name.startswith("gb") or iana_name in ZH_NAMES:
- return ["Chinese", "Classical Chinese"]
- if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
- return ["Korean"]
-
- return []
-
-
-@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
-def get_target_features(language: str) -> Tuple[bool, bool]:
- """
- Determine main aspects from a supported language if it contains accents and if is pure Latin.
- """
- target_have_accents = False # type: bool
- target_pure_latin = True # type: bool
-
- for character in FREQUENCIES[language]:
- if not target_have_accents and is_accentuated(character):
- target_have_accents = True
- if target_pure_latin and is_latin(character) is False:
- target_pure_latin = False
-
- return target_have_accents, target_pure_latin
-
-
-def alphabet_languages(
- characters: List[str], ignore_non_latin: bool = False
-) -> List[str]:
- """
- Return associated languages associated to given characters.
- """
- languages = [] # type: List[Tuple[str, float]]
-
- source_have_accents = any(is_accentuated(character) for character in characters)
-
- for language, language_characters in FREQUENCIES.items():
-
- target_have_accents, target_pure_latin = get_target_features(language)
-
- if ignore_non_latin and target_pure_latin is False:
- continue
-
- if target_have_accents is False and source_have_accents:
- continue
-
- character_count = len(language_characters) # type: int
-
- character_match_count = len(
- [c for c in language_characters if c in characters]
- ) # type: int
-
- ratio = character_match_count / character_count # type: float
-
- if ratio >= 0.2:
- languages.append((language, ratio))
-
- languages = sorted(languages, key=lambda x: x[1], reverse=True)
-
- return [compatible_language[0] for compatible_language in languages]
-
-
-def characters_popularity_compare(
- language: str, ordered_characters: List[str]
-) -> float:
- """
- Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
- The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
- Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
- """
- if language not in FREQUENCIES:
- raise ValueError("{} not available".format(language))
-
- character_approved_count = 0 # type: int
-
- for character in ordered_characters:
- if character not in FREQUENCIES[language]:
- continue
-
- characters_before_source = FREQUENCIES[language][
- 0 : FREQUENCIES[language].index(character)
- ] # type: List[str]
- characters_after_source = FREQUENCIES[language][
- FREQUENCIES[language].index(character) :
- ] # type: List[str]
-
- characters_before = ordered_characters[
- 0 : ordered_characters.index(character)
- ] # type: List[str]
- characters_after = ordered_characters[
- ordered_characters.index(character) :
- ] # type: List[str]
-
- before_match_count = [
- e in characters_before for e in characters_before_source
- ].count(
- True
- ) # type: int
- after_match_count = [
- e in characters_after for e in characters_after_source
- ].count(
- True
- ) # type: int
-
- if len(characters_before_source) == 0 and before_match_count <= 4:
- character_approved_count += 1
- continue
-
- if len(characters_after_source) == 0 and after_match_count <= 4:
- character_approved_count += 1
- continue
-
- if (
- before_match_count / len(characters_before_source) >= 0.4
- or after_match_count / len(characters_after_source) >= 0.4
- ):
- character_approved_count += 1
- continue
-
- return character_approved_count / len(ordered_characters)
-
-
-def alpha_unicode_split(decoded_sequence: str) -> List[str]:
- """
- Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
- Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
- One containing the latin letters and the other hebrew.
- """
- layers = OrderedDict() # type: Dict[str, str]
-
- for character in decoded_sequence:
- if character.isalpha() is False:
- continue
-
- character_range = unicode_range(character) # type: Optional[str]
-
- if character_range is None:
- continue
-
- layer_target_range = None # type: Optional[str]
-
- for discovered_range in layers:
- if (
- is_suspiciously_successive_range(discovered_range, character_range)
- is False
- ):
- layer_target_range = discovered_range
- break
-
- if layer_target_range is None:
- layer_target_range = character_range
-
- if layer_target_range not in layers:
- layers[layer_target_range] = character.lower()
- continue
-
- layers[layer_target_range] += character.lower()
-
- return list(layers.values())
-
-
-def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
- """
- This function merge results previously given by the function coherence_ratio.
- The return type is the same as coherence_ratio.
- """
- per_language_ratios = OrderedDict() # type: Dict[str, List[float]]
- for result in results:
- for sub_result in result:
- language, ratio = sub_result
- if language not in per_language_ratios:
- per_language_ratios[language] = [ratio]
- continue
- per_language_ratios[language].append(ratio)
-
- merge = [
- (
- language,
- round(
- sum(per_language_ratios[language]) / len(per_language_ratios[language]),
- 4,
- ),
- )
- for language in per_language_ratios
- ]
-
- return sorted(merge, key=lambda x: x[1], reverse=True)
-
-
-@lru_cache(maxsize=2048)
-def coherence_ratio(
- decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
-) -> CoherenceMatches:
- """
- Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
- A layer = Character extraction by alphabets/ranges.
- """
-
- results = [] # type: List[Tuple[str, float]]
- ignore_non_latin = False # type: bool
-
- sufficient_match_count = 0 # type: int
-
- lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
- if "Latin Based" in lg_inclusion_list:
- ignore_non_latin = True
- lg_inclusion_list.remove("Latin Based")
-
- for layer in alpha_unicode_split(decoded_sequence):
- sequence_frequencies = Counter(layer) # type: Counter
- most_common = sequence_frequencies.most_common()
-
- character_count = sum(o for c, o in most_common) # type: int
-
- if character_count <= TOO_SMALL_SEQUENCE:
- continue
-
- popular_character_ordered = [c for c, o in most_common] # type: List[str]
-
- for language in lg_inclusion_list or alphabet_languages(
- popular_character_ordered, ignore_non_latin
- ):
- ratio = characters_popularity_compare(
- language, popular_character_ordered
- ) # type: float
-
- if ratio < threshold:
- continue
- elif ratio >= 0.8:
- sufficient_match_count += 1
-
- results.append((language, round(ratio, 4)))
-
- if sufficient_match_count >= 3:
- break
-
- return sorted(results, key=lambda x: x[1], reverse=True)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cli/normalizer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cli/normalizer.py
deleted file mode 100644
index 5f912c92..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cli/normalizer.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import argparse
-import sys
-from json import dumps
-from os.path import abspath
-from platform import python_version
-from typing import List
-
-from charset_normalizer import from_fp
-from charset_normalizer.models import CliDetectionResult
-from charset_normalizer.version import __version__
-
-
-def query_yes_no(question: str, default: str = "yes") -> bool:
- """Ask a yes/no question via input() and return their answer.
-
- "question" is a string that is presented to the user.
- "default" is the presumed answer if the user just hits .
- It must be "yes" (the default), "no" or None (meaning
- an answer is required of the user).
-
- The "answer" return value is True for "yes" or False for "no".
-
- Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
- """
- valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
- if default is None:
- prompt = " [y/n] "
- elif default == "yes":
- prompt = " [Y/n] "
- elif default == "no":
- prompt = " [y/N] "
- else:
- raise ValueError("invalid default answer: '%s'" % default)
-
- while True:
- sys.stdout.write(question + prompt)
- choice = input().lower()
- if default is not None and choice == "":
- return valid[default]
- elif choice in valid:
- return valid[choice]
- else:
- sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
-
-
-def cli_detect(argv: List[str] = None) -> int:
- """
- CLI assistant using ARGV and ArgumentParser
- :param argv:
- :return: 0 if everything is fine, anything else equal trouble
- """
- parser = argparse.ArgumentParser(
- description="The Real First Universal Charset Detector. "
- "Discover originating encoding used on text file. "
- "Normalize text to unicode."
- )
-
- parser.add_argument(
- "files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
- )
- parser.add_argument(
- "-v",
- "--verbose",
- action="store_true",
- default=False,
- dest="verbose",
- help="Display complementary information about file if any. "
- "Stdout will contain logs about the detection process.",
- )
- parser.add_argument(
- "-a",
- "--with-alternative",
- action="store_true",
- default=False,
- dest="alternatives",
- help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
- )
- parser.add_argument(
- "-n",
- "--normalize",
- action="store_true",
- default=False,
- dest="normalize",
- help="Permit to normalize input file. If not set, program does not write anything.",
- )
- parser.add_argument(
- "-m",
- "--minimal",
- action="store_true",
- default=False,
- dest="minimal",
- help="Only output the charset detected to STDOUT. Disabling JSON output.",
- )
- parser.add_argument(
- "-r",
- "--replace",
- action="store_true",
- default=False,
- dest="replace",
- help="Replace file when trying to normalize it instead of creating a new one.",
- )
- parser.add_argument(
- "-f",
- "--force",
- action="store_true",
- default=False,
- dest="force",
- help="Replace file without asking if you are sure, use this flag with caution.",
- )
- parser.add_argument(
- "-t",
- "--threshold",
- action="store",
- default=0.1,
- type=float,
- dest="threshold",
- help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
- )
- parser.add_argument(
- "--version",
- action="version",
- version="Charset-Normalizer {} - Python {}".format(
- __version__, python_version()
- ),
- help="Show version information and exit.",
- )
-
- args = parser.parse_args(argv)
-
- if args.replace is True and args.normalize is False:
- print("Use --replace in addition of --normalize only.", file=sys.stderr)
- return 1
-
- if args.force is True and args.replace is False:
- print("Use --force in addition of --replace only.", file=sys.stderr)
- return 1
-
- if args.threshold < 0.0 or args.threshold > 1.0:
- print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
- return 1
-
- x_ = []
-
- for my_file in args.files:
-
- matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
-
- best_guess = matches.best()
-
- if best_guess is None:
- print(
- 'Unable to identify originating encoding for "{}". {}'.format(
- my_file.name,
- "Maybe try increasing maximum amount of chaos."
- if args.threshold < 1.0
- else "",
- ),
- file=sys.stderr,
- )
- x_.append(
- CliDetectionResult(
- abspath(my_file.name),
- None,
- [],
- [],
- "Unknown",
- [],
- False,
- 1.0,
- 0.0,
- None,
- True,
- )
- )
- else:
- x_.append(
- CliDetectionResult(
- abspath(my_file.name),
- best_guess.encoding,
- best_guess.encoding_aliases,
- [
- cp
- for cp in best_guess.could_be_from_charset
- if cp != best_guess.encoding
- ],
- best_guess.language,
- best_guess.alphabets,
- best_guess.bom,
- best_guess.percent_chaos,
- best_guess.percent_coherence,
- None,
- True,
- )
- )
-
- if len(matches) > 1 and args.alternatives:
- for el in matches:
- if el != best_guess:
- x_.append(
- CliDetectionResult(
- abspath(my_file.name),
- el.encoding,
- el.encoding_aliases,
- [
- cp
- for cp in el.could_be_from_charset
- if cp != el.encoding
- ],
- el.language,
- el.alphabets,
- el.bom,
- el.percent_chaos,
- el.percent_coherence,
- None,
- False,
- )
- )
-
- if args.normalize is True:
-
- if best_guess.encoding.startswith("utf") is True:
- print(
- '"{}" file does not need to be normalized, as it already came from unicode.'.format(
- my_file.name
- ),
- file=sys.stderr,
- )
- if my_file.closed is False:
- my_file.close()
- continue
-
- o_ = my_file.name.split(".") # type: List[str]
-
- if args.replace is False:
- o_.insert(-1, best_guess.encoding)
- if my_file.closed is False:
- my_file.close()
- elif (
- args.force is False
- and query_yes_no(
- 'Are you sure to normalize "{}" by replacing it ?'.format(
- my_file.name
- ),
- "no",
- )
- is False
- ):
- if my_file.closed is False:
- my_file.close()
- continue
-
- try:
- x_[0].unicode_path = abspath("./{}".format(".".join(o_)))
-
- with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
- fp.write(str(best_guess))
- except IOError as e:
- print(str(e), file=sys.stderr)
- if my_file.closed is False:
- my_file.close()
- return 2
-
- if my_file.closed is False:
- my_file.close()
-
- if args.minimal is False:
- print(
- dumps(
- [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
- ensure_ascii=True,
- indent=4,
- )
- )
- else:
- for my_file in args.files:
- print(
- ", ".join(
- [
- el.encoding or "undefined"
- for el in x_
- if el.path == abspath(my_file.name)
- ]
- )
- )
-
- return 0
-
-
-if __name__ == "__main__":
- cli_detect()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/constant.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/constant.py
deleted file mode 100644
index c32f5cf2..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/constant.py
+++ /dev/null
@@ -1,503 +0,0 @@
-from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
-from collections import OrderedDict
-from encodings.aliases import aliases
-from re import IGNORECASE, compile as re_compile
-from typing import Dict, List, Set, Union
-
-from .assets import FREQUENCIES
-
-# Contain for each eligible encoding a list of/item bytes SIG/BOM
-ENCODING_MARKS = OrderedDict(
- [
- ("utf_8", BOM_UTF8),
- (
- "utf_7",
- [
- b"\x2b\x2f\x76\x38",
- b"\x2b\x2f\x76\x39",
- b"\x2b\x2f\x76\x2b",
- b"\x2b\x2f\x76\x2f",
- b"\x2b\x2f\x76\x38\x2d",
- ],
- ),
- ("gb18030", b"\x84\x31\x95\x33"),
- ("utf_32", [BOM_UTF32_BE, BOM_UTF32_LE]),
- ("utf_16", [BOM_UTF16_BE, BOM_UTF16_LE]),
- ]
-) # type: Dict[str, Union[bytes, List[bytes]]]
-
-TOO_SMALL_SEQUENCE = 32 # type: int
-TOO_BIG_SEQUENCE = int(10e6) # type: int
-
-UTF8_MAXIMAL_ALLOCATION = 1112064 # type: int
-
-UNICODE_RANGES_COMBINED = {
- "Control character": range(31 + 1),
- "Basic Latin": range(32, 127 + 1),
- "Latin-1 Supplement": range(128, 255 + 1),
- "Latin Extended-A": range(256, 383 + 1),
- "Latin Extended-B": range(384, 591 + 1),
- "IPA Extensions": range(592, 687 + 1),
- "Spacing Modifier Letters": range(688, 767 + 1),
- "Combining Diacritical Marks": range(768, 879 + 1),
- "Greek and Coptic": range(880, 1023 + 1),
- "Cyrillic": range(1024, 1279 + 1),
- "Cyrillic Supplement": range(1280, 1327 + 1),
- "Armenian": range(1328, 1423 + 1),
- "Hebrew": range(1424, 1535 + 1),
- "Arabic": range(1536, 1791 + 1),
- "Syriac": range(1792, 1871 + 1),
- "Arabic Supplement": range(1872, 1919 + 1),
- "Thaana": range(1920, 1983 + 1),
- "NKo": range(1984, 2047 + 1),
- "Samaritan": range(2048, 2111 + 1),
- "Mandaic": range(2112, 2143 + 1),
- "Syriac Supplement": range(2144, 2159 + 1),
- "Arabic Extended-A": range(2208, 2303 + 1),
- "Devanagari": range(2304, 2431 + 1),
- "Bengali": range(2432, 2559 + 1),
- "Gurmukhi": range(2560, 2687 + 1),
- "Gujarati": range(2688, 2815 + 1),
- "Oriya": range(2816, 2943 + 1),
- "Tamil": range(2944, 3071 + 1),
- "Telugu": range(3072, 3199 + 1),
- "Kannada": range(3200, 3327 + 1),
- "Malayalam": range(3328, 3455 + 1),
- "Sinhala": range(3456, 3583 + 1),
- "Thai": range(3584, 3711 + 1),
- "Lao": range(3712, 3839 + 1),
- "Tibetan": range(3840, 4095 + 1),
- "Myanmar": range(4096, 4255 + 1),
- "Georgian": range(4256, 4351 + 1),
- "Hangul Jamo": range(4352, 4607 + 1),
- "Ethiopic": range(4608, 4991 + 1),
- "Ethiopic Supplement": range(4992, 5023 + 1),
- "Cherokee": range(5024, 5119 + 1),
- "Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1),
- "Ogham": range(5760, 5791 + 1),
- "Runic": range(5792, 5887 + 1),
- "Tagalog": range(5888, 5919 + 1),
- "Hanunoo": range(5920, 5951 + 1),
- "Buhid": range(5952, 5983 + 1),
- "Tagbanwa": range(5984, 6015 + 1),
- "Khmer": range(6016, 6143 + 1),
- "Mongolian": range(6144, 6319 + 1),
- "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1),
- "Limbu": range(6400, 6479 + 1),
- "Tai Le": range(6480, 6527 + 1),
- "New Tai Lue": range(6528, 6623 + 1),
- "Khmer Symbols": range(6624, 6655 + 1),
- "Buginese": range(6656, 6687 + 1),
- "Tai Tham": range(6688, 6831 + 1),
- "Combining Diacritical Marks Extended": range(6832, 6911 + 1),
- "Balinese": range(6912, 7039 + 1),
- "Sundanese": range(7040, 7103 + 1),
- "Batak": range(7104, 7167 + 1),
- "Lepcha": range(7168, 7247 + 1),
- "Ol Chiki": range(7248, 7295 + 1),
- "Cyrillic Extended C": range(7296, 7311 + 1),
- "Sundanese Supplement": range(7360, 7375 + 1),
- "Vedic Extensions": range(7376, 7423 + 1),
- "Phonetic Extensions": range(7424, 7551 + 1),
- "Phonetic Extensions Supplement": range(7552, 7615 + 1),
- "Combining Diacritical Marks Supplement": range(7616, 7679 + 1),
- "Latin Extended Additional": range(7680, 7935 + 1),
- "Greek Extended": range(7936, 8191 + 1),
- "General Punctuation": range(8192, 8303 + 1),
- "Superscripts and Subscripts": range(8304, 8351 + 1),
- "Currency Symbols": range(8352, 8399 + 1),
- "Combining Diacritical Marks for Symbols": range(8400, 8447 + 1),
- "Letterlike Symbols": range(8448, 8527 + 1),
- "Number Forms": range(8528, 8591 + 1),
- "Arrows": range(8592, 8703 + 1),
- "Mathematical Operators": range(8704, 8959 + 1),
- "Miscellaneous Technical": range(8960, 9215 + 1),
- "Control Pictures": range(9216, 9279 + 1),
- "Optical Character Recognition": range(9280, 9311 + 1),
- "Enclosed Alphanumerics": range(9312, 9471 + 1),
- "Box Drawing": range(9472, 9599 + 1),
- "Block Elements": range(9600, 9631 + 1),
- "Geometric Shapes": range(9632, 9727 + 1),
- "Miscellaneous Symbols": range(9728, 9983 + 1),
- "Dingbats": range(9984, 10175 + 1),
- "Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1),
- "Supplemental Arrows-A": range(10224, 10239 + 1),
- "Braille Patterns": range(10240, 10495 + 1),
- "Supplemental Arrows-B": range(10496, 10623 + 1),
- "Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1),
- "Supplemental Mathematical Operators": range(10752, 11007 + 1),
- "Miscellaneous Symbols and Arrows": range(11008, 11263 + 1),
- "Glagolitic": range(11264, 11359 + 1),
- "Latin Extended-C": range(11360, 11391 + 1),
- "Coptic": range(11392, 11519 + 1),
- "Georgian Supplement": range(11520, 11567 + 1),
- "Tifinagh": range(11568, 11647 + 1),
- "Ethiopic Extended": range(11648, 11743 + 1),
- "Cyrillic Extended-A": range(11744, 11775 + 1),
- "Supplemental Punctuation": range(11776, 11903 + 1),
- "CJK Radicals Supplement": range(11904, 12031 + 1),
- "Kangxi Radicals": range(12032, 12255 + 1),
- "Ideographic Description Characters": range(12272, 12287 + 1),
- "CJK Symbols and Punctuation": range(12288, 12351 + 1),
- "Hiragana": range(12352, 12447 + 1),
- "Katakana": range(12448, 12543 + 1),
- "Bopomofo": range(12544, 12591 + 1),
- "Hangul Compatibility Jamo": range(12592, 12687 + 1),
- "Kanbun": range(12688, 12703 + 1),
- "Bopomofo Extended": range(12704, 12735 + 1),
- "CJK Strokes": range(12736, 12783 + 1),
- "Katakana Phonetic Extensions": range(12784, 12799 + 1),
- "Enclosed CJK Letters and Months": range(12800, 13055 + 1),
- "CJK Compatibility": range(13056, 13311 + 1),
- "CJK Unified Ideographs Extension A": range(13312, 19903 + 1),
- "Yijing Hexagram Symbols": range(19904, 19967 + 1),
- "CJK Unified Ideographs": range(19968, 40959 + 1),
- "Yi Syllables": range(40960, 42127 + 1),
- "Yi Radicals": range(42128, 42191 + 1),
- "Lisu": range(42192, 42239 + 1),
- "Vai": range(42240, 42559 + 1),
- "Cyrillic Extended-B": range(42560, 42655 + 1),
- "Bamum": range(42656, 42751 + 1),
- "Modifier Tone Letters": range(42752, 42783 + 1),
- "Latin Extended-D": range(42784, 43007 + 1),
- "Syloti Nagri": range(43008, 43055 + 1),
- "Common Indic Number Forms": range(43056, 43071 + 1),
- "Phags-pa": range(43072, 43135 + 1),
- "Saurashtra": range(43136, 43231 + 1),
- "Devanagari Extended": range(43232, 43263 + 1),
- "Kayah Li": range(43264, 43311 + 1),
- "Rejang": range(43312, 43359 + 1),
- "Hangul Jamo Extended-A": range(43360, 43391 + 1),
- "Javanese": range(43392, 43487 + 1),
- "Myanmar Extended-B": range(43488, 43519 + 1),
- "Cham": range(43520, 43615 + 1),
- "Myanmar Extended-A": range(43616, 43647 + 1),
- "Tai Viet": range(43648, 43743 + 1),
- "Meetei Mayek Extensions": range(43744, 43775 + 1),
- "Ethiopic Extended-A": range(43776, 43823 + 1),
- "Latin Extended-E": range(43824, 43887 + 1),
- "Cherokee Supplement": range(43888, 43967 + 1),
- "Meetei Mayek": range(43968, 44031 + 1),
- "Hangul Syllables": range(44032, 55215 + 1),
- "Hangul Jamo Extended-B": range(55216, 55295 + 1),
- "High Surrogates": range(55296, 56191 + 1),
- "High Private Use Surrogates": range(56192, 56319 + 1),
- "Low Surrogates": range(56320, 57343 + 1),
- "Private Use Area": range(57344, 63743 + 1),
- "CJK Compatibility Ideographs": range(63744, 64255 + 1),
- "Alphabetic Presentation Forms": range(64256, 64335 + 1),
- "Arabic Presentation Forms-A": range(64336, 65023 + 1),
- "Variation Selectors": range(65024, 65039 + 1),
- "Vertical Forms": range(65040, 65055 + 1),
- "Combining Half Marks": range(65056, 65071 + 1),
- "CJK Compatibility Forms": range(65072, 65103 + 1),
- "Small Form Variants": range(65104, 65135 + 1),
- "Arabic Presentation Forms-B": range(65136, 65279 + 1),
- "Halfwidth and Fullwidth Forms": range(65280, 65519 + 1),
- "Specials": range(65520, 65535 + 1),
- "Linear B Syllabary": range(65536, 65663 + 1),
- "Linear B Ideograms": range(65664, 65791 + 1),
- "Aegean Numbers": range(65792, 65855 + 1),
- "Ancient Greek Numbers": range(65856, 65935 + 1),
- "Ancient Symbols": range(65936, 65999 + 1),
- "Phaistos Disc": range(66000, 66047 + 1),
- "Lycian": range(66176, 66207 + 1),
- "Carian": range(66208, 66271 + 1),
- "Coptic Epact Numbers": range(66272, 66303 + 1),
- "Old Italic": range(66304, 66351 + 1),
- "Gothic": range(66352, 66383 + 1),
- "Old Permic": range(66384, 66431 + 1),
- "Ugaritic": range(66432, 66463 + 1),
- "Old Persian": range(66464, 66527 + 1),
- "Deseret": range(66560, 66639 + 1),
- "Shavian": range(66640, 66687 + 1),
- "Osmanya": range(66688, 66735 + 1),
- "Osage": range(66736, 66815 + 1),
- "Elbasan": range(66816, 66863 + 1),
- "Caucasian Albanian": range(66864, 66927 + 1),
- "Linear A": range(67072, 67455 + 1),
- "Cypriot Syllabary": range(67584, 67647 + 1),
- "Imperial Aramaic": range(67648, 67679 + 1),
- "Palmyrene": range(67680, 67711 + 1),
- "Nabataean": range(67712, 67759 + 1),
- "Hatran": range(67808, 67839 + 1),
- "Phoenician": range(67840, 67871 + 1),
- "Lydian": range(67872, 67903 + 1),
- "Meroitic Hieroglyphs": range(67968, 67999 + 1),
- "Meroitic Cursive": range(68000, 68095 + 1),
- "Kharoshthi": range(68096, 68191 + 1),
- "Old South Arabian": range(68192, 68223 + 1),
- "Old North Arabian": range(68224, 68255 + 1),
- "Manichaean": range(68288, 68351 + 1),
- "Avestan": range(68352, 68415 + 1),
- "Inscriptional Parthian": range(68416, 68447 + 1),
- "Inscriptional Pahlavi": range(68448, 68479 + 1),
- "Psalter Pahlavi": range(68480, 68527 + 1),
- "Old Turkic": range(68608, 68687 + 1),
- "Old Hungarian": range(68736, 68863 + 1),
- "Rumi Numeral Symbols": range(69216, 69247 + 1),
- "Brahmi": range(69632, 69759 + 1),
- "Kaithi": range(69760, 69839 + 1),
- "Sora Sompeng": range(69840, 69887 + 1),
- "Chakma": range(69888, 69967 + 1),
- "Mahajani": range(69968, 70015 + 1),
- "Sharada": range(70016, 70111 + 1),
- "Sinhala Archaic Numbers": range(70112, 70143 + 1),
- "Khojki": range(70144, 70223 + 1),
- "Multani": range(70272, 70319 + 1),
- "Khudawadi": range(70320, 70399 + 1),
- "Grantha": range(70400, 70527 + 1),
- "Newa": range(70656, 70783 + 1),
- "Tirhuta": range(70784, 70879 + 1),
- "Siddham": range(71040, 71167 + 1),
- "Modi": range(71168, 71263 + 1),
- "Mongolian Supplement": range(71264, 71295 + 1),
- "Takri": range(71296, 71375 + 1),
- "Ahom": range(71424, 71487 + 1),
- "Warang Citi": range(71840, 71935 + 1),
- "Zanabazar Square": range(72192, 72271 + 1),
- "Soyombo": range(72272, 72367 + 1),
- "Pau Cin Hau": range(72384, 72447 + 1),
- "Bhaiksuki": range(72704, 72815 + 1),
- "Marchen": range(72816, 72895 + 1),
- "Masaram Gondi": range(72960, 73055 + 1),
- "Cuneiform": range(73728, 74751 + 1),
- "Cuneiform Numbers and Punctuation": range(74752, 74879 + 1),
- "Early Dynastic Cuneiform": range(74880, 75087 + 1),
- "Egyptian Hieroglyphs": range(77824, 78895 + 1),
- "Anatolian Hieroglyphs": range(82944, 83583 + 1),
- "Bamum Supplement": range(92160, 92735 + 1),
- "Mro": range(92736, 92783 + 1),
- "Bassa Vah": range(92880, 92927 + 1),
- "Pahawh Hmong": range(92928, 93071 + 1),
- "Miao": range(93952, 94111 + 1),
- "Ideographic Symbols and Punctuation": range(94176, 94207 + 1),
- "Tangut": range(94208, 100351 + 1),
- "Tangut Components": range(100352, 101119 + 1),
- "Kana Supplement": range(110592, 110847 + 1),
- "Kana Extended-A": range(110848, 110895 + 1),
- "Nushu": range(110960, 111359 + 1),
- "Duployan": range(113664, 113823 + 1),
- "Shorthand Format Controls": range(113824, 113839 + 1),
- "Byzantine Musical Symbols": range(118784, 119039 + 1),
- "Musical Symbols": range(119040, 119295 + 1),
- "Ancient Greek Musical Notation": range(119296, 119375 + 1),
- "Tai Xuan Jing Symbols": range(119552, 119647 + 1),
- "Counting Rod Numerals": range(119648, 119679 + 1),
- "Mathematical Alphanumeric Symbols": range(119808, 120831 + 1),
- "Sutton SignWriting": range(120832, 121519 + 1),
- "Glagolitic Supplement": range(122880, 122927 + 1),
- "Mende Kikakui": range(124928, 125151 + 1),
- "Adlam": range(125184, 125279 + 1),
- "Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1),
- "Mahjong Tiles": range(126976, 127023 + 1),
- "Domino Tiles": range(127024, 127135 + 1),
- "Playing Cards": range(127136, 127231 + 1),
- "Enclosed Alphanumeric Supplement": range(127232, 127487 + 1),
- "Enclosed Ideographic Supplement": range(127488, 127743 + 1),
- "Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1),
- "Emoticons range(Emoji)": range(128512, 128591 + 1),
- "Ornamental Dingbats": range(128592, 128639 + 1),
- "Transport and Map Symbols": range(128640, 128767 + 1),
- "Alchemical Symbols": range(128768, 128895 + 1),
- "Geometric Shapes Extended": range(128896, 129023 + 1),
- "Supplemental Arrows-C": range(129024, 129279 + 1),
- "Supplemental Symbols and Pictographs": range(129280, 129535 + 1),
- "CJK Unified Ideographs Extension B": range(131072, 173791 + 1),
- "CJK Unified Ideographs Extension C": range(173824, 177983 + 1),
- "CJK Unified Ideographs Extension D": range(177984, 178207 + 1),
- "CJK Unified Ideographs Extension E": range(178208, 183983 + 1),
- "CJK Unified Ideographs Extension F": range(183984, 191471 + 1),
- "CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1),
- "Tags": range(917504, 917631 + 1),
- "Variation Selectors Supplement": range(917760, 917999 + 1),
-} # type: Dict[str, range]
-
-
-UNICODE_SECONDARY_RANGE_KEYWORD = [
- "Supplement",
- "Extended",
- "Extensions",
- "Modifier",
- "Marks",
- "Punctuation",
- "Symbols",
- "Forms",
- "Operators",
- "Miscellaneous",
- "Drawing",
- "Block",
- "Shapes",
- "Supplemental",
- "Tags",
-] # type: List[str]
-
-RE_POSSIBLE_ENCODING_INDICATION = re_compile(
- r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
- IGNORECASE,
-)
-
-IANA_SUPPORTED = sorted(
- filter(
- lambda x: x.endswith("_codec") is False
- and x not in {"rot_13", "tactis", "mbcs"},
- list(set(aliases.values())),
- )
-) # type: List[str]
-
-IANA_SUPPORTED_COUNT = len(IANA_SUPPORTED) # type: int
-
-# pre-computed code page that are similar using the function cp_similarity.
-IANA_SUPPORTED_SIMILAR = {
- "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
- "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
- "cp1125": ["cp866"],
- "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
- "cp1250": ["iso8859_2"],
- "cp1251": ["kz1048", "ptcp154"],
- "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
- "cp1253": ["iso8859_7"],
- "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
- "cp1257": ["iso8859_13"],
- "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
- "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
- "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
- "cp850": ["cp437", "cp857", "cp858", "cp865"],
- "cp857": ["cp850", "cp858", "cp865"],
- "cp858": ["cp437", "cp850", "cp857", "cp865"],
- "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
- "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
- "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
- "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
- "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
- "cp866": ["cp1125"],
- "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
- "iso8859_11": ["tis_620"],
- "iso8859_13": ["cp1257"],
- "iso8859_14": [
- "iso8859_10",
- "iso8859_15",
- "iso8859_16",
- "iso8859_3",
- "iso8859_9",
- "latin_1",
- ],
- "iso8859_15": [
- "cp1252",
- "cp1254",
- "iso8859_10",
- "iso8859_14",
- "iso8859_16",
- "iso8859_3",
- "iso8859_9",
- "latin_1",
- ],
- "iso8859_16": [
- "iso8859_14",
- "iso8859_15",
- "iso8859_2",
- "iso8859_3",
- "iso8859_9",
- "latin_1",
- ],
- "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
- "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
- "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
- "iso8859_7": ["cp1253"],
- "iso8859_9": [
- "cp1252",
- "cp1254",
- "cp1258",
- "iso8859_10",
- "iso8859_14",
- "iso8859_15",
- "iso8859_16",
- "iso8859_3",
- "iso8859_4",
- "latin_1",
- ],
- "kz1048": ["cp1251", "ptcp154"],
- "latin_1": [
- "cp1252",
- "cp1254",
- "cp1258",
- "iso8859_10",
- "iso8859_14",
- "iso8859_15",
- "iso8859_16",
- "iso8859_3",
- "iso8859_4",
- "iso8859_9",
- ],
- "mac_iceland": ["mac_roman", "mac_turkish"],
- "mac_roman": ["mac_iceland", "mac_turkish"],
- "mac_turkish": ["mac_iceland", "mac_roman"],
- "ptcp154": ["cp1251", "kz1048"],
- "tis_620": ["iso8859_11"],
-} # type: Dict[str, List[str]]
-
-
-CHARDET_CORRESPONDENCE = {
- "iso2022_kr": "ISO-2022-KR",
- "iso2022_jp": "ISO-2022-JP",
- "euc_kr": "EUC-KR",
- "tis_620": "TIS-620",
- "utf_32": "UTF-32",
- "euc_jp": "EUC-JP",
- "koi8_r": "KOI8-R",
- "iso8859_1": "ISO-8859-1",
- "iso8859_2": "ISO-8859-2",
- "iso8859_5": "ISO-8859-5",
- "iso8859_6": "ISO-8859-6",
- "iso8859_7": "ISO-8859-7",
- "iso8859_8": "ISO-8859-8",
- "utf_16": "UTF-16",
- "cp855": "IBM855",
- "mac_cyrillic": "MacCyrillic",
- "gb2312": "GB2312",
- "gb18030": "GB18030",
- "cp932": "CP932",
- "cp866": "IBM866",
- "utf_8": "utf-8",
- "utf_8_sig": "UTF-8-SIG",
- "shift_jis": "SHIFT_JIS",
- "big5": "Big5",
- "cp1250": "windows-1250",
- "cp1251": "windows-1251",
- "cp1252": "Windows-1252",
- "cp1253": "windows-1253",
- "cp1255": "windows-1255",
- "cp1256": "windows-1256",
- "cp1254": "Windows-1254",
- "cp949": "CP949",
-} # type: Dict[str, str]
-
-
-COMMON_SAFE_ASCII_CHARACTERS = {
- "<",
- ">",
- "=",
- ":",
- "/",
- "&",
- ";",
- "{",
- "}",
- "[",
- "]",
- ",",
- "|",
- '"',
- "-",
-} # type: Set[str]
-
-
-KO_NAMES = {"johab", "cp949", "euc_kr"} # type: Set[str]
-ZH_NAMES = {"big5", "cp950", "big5hkscs", "hz"} # type: Set[str]
-
-NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+")
-
-LANGUAGE_SUPPORTED_COUNT = len(FREQUENCIES) # type: int
-
-# Logging LEVEL bellow DEBUG
-TRACE = 5 # type: int
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/legacy.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/legacy.py
deleted file mode 100644
index cdebe2b8..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/legacy.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import warnings
-from typing import Dict, Optional, Union
-
-from .api import from_bytes, from_fp, from_path, normalize
-from .constant import CHARDET_CORRESPONDENCE
-from .models import CharsetMatch, CharsetMatches
-
-
-def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]:
- """
- chardet legacy method
- Detect the encoding of the given byte string. It should be mostly backward-compatible.
- Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
- This function is deprecated and should be used to migrate your project easily, consult the documentation for
- further information. Not planned for removal.
-
- :param byte_str: The byte sequence to examine.
- """
- if not isinstance(byte_str, (bytearray, bytes)):
- raise TypeError( # pragma: nocover
- "Expected object of type bytes or bytearray, got: "
- "{0}".format(type(byte_str))
- )
-
- if isinstance(byte_str, bytearray):
- byte_str = bytes(byte_str)
-
- r = from_bytes(byte_str).best()
-
- encoding = r.encoding if r is not None else None
- language = r.language if r is not None and r.language != "Unknown" else ""
- confidence = 1.0 - r.chaos if r is not None else None
-
- # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
- # but chardet does return 'utf-8-sig' and it is a valid codec name.
- if r is not None and encoding == "utf_8" and r.bom:
- encoding += "_sig"
-
- return {
- "encoding": encoding
- if encoding not in CHARDET_CORRESPONDENCE
- else CHARDET_CORRESPONDENCE[encoding],
- "language": language,
- "confidence": confidence,
- }
-
-
-class CharsetNormalizerMatch(CharsetMatch):
- pass
-
-
-class CharsetNormalizerMatches(CharsetMatches):
- @staticmethod
- def from_fp(*args, **kwargs): # type: ignore
- warnings.warn( # pragma: nocover
- "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
- "and scheduled to be removed in 3.0",
- DeprecationWarning,
- )
- return from_fp(*args, **kwargs) # pragma: nocover
-
- @staticmethod
- def from_bytes(*args, **kwargs): # type: ignore
- warnings.warn( # pragma: nocover
- "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
- "and scheduled to be removed in 3.0",
- DeprecationWarning,
- )
- return from_bytes(*args, **kwargs) # pragma: nocover
-
- @staticmethod
- def from_path(*args, **kwargs): # type: ignore
- warnings.warn( # pragma: nocover
- "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
- "and scheduled to be removed in 3.0",
- DeprecationWarning,
- )
- return from_path(*args, **kwargs) # pragma: nocover
-
- @staticmethod
- def normalize(*args, **kwargs): # type: ignore
- warnings.warn( # pragma: nocover
- "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
- "and scheduled to be removed in 3.0",
- DeprecationWarning,
- )
- return normalize(*args, **kwargs) # pragma: nocover
-
-
-class CharsetDetector(CharsetNormalizerMatches):
- pass
-
-
-class CharsetDoctor(CharsetNormalizerMatches):
- pass
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/md.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/md.py
deleted file mode 100644
index f3d6505c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/md.py
+++ /dev/null
@@ -1,559 +0,0 @@
-from functools import lru_cache
-from typing import List, Optional
-
-from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD
-from .utils import (
- is_accentuated,
- is_ascii,
- is_case_variable,
- is_cjk,
- is_emoticon,
- is_hangul,
- is_hiragana,
- is_katakana,
- is_latin,
- is_punctuation,
- is_separator,
- is_symbol,
- is_thai,
- remove_accent,
- unicode_range,
-)
-
-
-class MessDetectorPlugin:
- """
- Base abstract class used for mess detection plugins.
- All detectors MUST extend and implement given methods.
- """
-
- def eligible(self, character: str) -> bool:
- """
- Determine if given character should be fed in.
- """
- raise NotImplementedError # pragma: nocover
-
- def feed(self, character: str) -> None:
- """
- The main routine to be executed upon character.
- Insert the logic in witch the text would be considered chaotic.
- """
- raise NotImplementedError # pragma: nocover
-
- def reset(self) -> None: # pragma: no cover
- """
- Permit to reset the plugin to the initial state.
- """
- raise NotImplementedError
-
- @property
- def ratio(self) -> float:
- """
- Compute the chaos ratio based on what your feed() has seen.
- Must NOT be lower than 0.; No restriction gt 0.
- """
- raise NotImplementedError # pragma: nocover
-
-
-class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._punctuation_count = 0 # type: int
- self._symbol_count = 0 # type: int
- self._character_count = 0 # type: int
-
- self._last_printable_char = None # type: Optional[str]
- self._frenzy_symbol_in_word = False # type: bool
-
- def eligible(self, character: str) -> bool:
- return character.isprintable()
-
- def feed(self, character: str) -> None:
- self._character_count += 1
-
- if (
- character != self._last_printable_char
- and character not in COMMON_SAFE_ASCII_CHARACTERS
- ):
- if is_punctuation(character):
- self._punctuation_count += 1
- elif (
- character.isdigit() is False
- and is_symbol(character)
- and is_emoticon(character) is False
- ):
- self._symbol_count += 2
-
- self._last_printable_char = character
-
- def reset(self) -> None: # pragma: no cover
- self._punctuation_count = 0
- self._character_count = 0
- self._symbol_count = 0
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
-
- ratio_of_punctuation = (
- self._punctuation_count + self._symbol_count
- ) / self._character_count # type: float
-
- return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
-
-
-class TooManyAccentuatedPlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._character_count = 0 # type: int
- self._accentuated_count = 0 # type: int
-
- def eligible(self, character: str) -> bool:
- return character.isalpha()
-
- def feed(self, character: str) -> None:
- self._character_count += 1
-
- if is_accentuated(character):
- self._accentuated_count += 1
-
- def reset(self) -> None: # pragma: no cover
- self._character_count = 0
- self._accentuated_count = 0
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
- ratio_of_accentuation = (
- self._accentuated_count / self._character_count
- ) # type: float
- return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
-
-
-class UnprintablePlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._unprintable_count = 0 # type: int
- self._character_count = 0 # type: int
-
- def eligible(self, character: str) -> bool:
- return True
-
- def feed(self, character: str) -> None:
- if (
- character.isspace() is False # includes \n \t \r \v
- and character.isprintable() is False
- and character != "\x1A" # Why? Its the ASCII substitute character.
- ):
- self._unprintable_count += 1
- self._character_count += 1
-
- def reset(self) -> None: # pragma: no cover
- self._unprintable_count = 0
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
-
- return (self._unprintable_count * 8) / self._character_count
-
-
-class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._successive_count = 0 # type: int
- self._character_count = 0 # type: int
-
- self._last_latin_character = None # type: Optional[str]
-
- def eligible(self, character: str) -> bool:
- return character.isalpha() and is_latin(character)
-
- def feed(self, character: str) -> None:
- self._character_count += 1
- if (
- self._last_latin_character is not None
- and is_accentuated(character)
- and is_accentuated(self._last_latin_character)
- ):
- if character.isupper() and self._last_latin_character.isupper():
- self._successive_count += 1
- # Worse if its the same char duplicated with different accent.
- if remove_accent(character) == remove_accent(self._last_latin_character):
- self._successive_count += 1
- self._last_latin_character = character
-
- def reset(self) -> None: # pragma: no cover
- self._successive_count = 0
- self._character_count = 0
- self._last_latin_character = None
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
-
- return (self._successive_count * 2) / self._character_count
-
-
-class SuspiciousRange(MessDetectorPlugin):
- def __init__(self) -> None:
- self._suspicious_successive_range_count = 0 # type: int
- self._character_count = 0 # type: int
- self._last_printable_seen = None # type: Optional[str]
-
- def eligible(self, character: str) -> bool:
- return character.isprintable()
-
- def feed(self, character: str) -> None:
- self._character_count += 1
-
- if (
- character.isspace()
- or is_punctuation(character)
- or character in COMMON_SAFE_ASCII_CHARACTERS
- ):
- self._last_printable_seen = None
- return
-
- if self._last_printable_seen is None:
- self._last_printable_seen = character
- return
-
- unicode_range_a = unicode_range(
- self._last_printable_seen
- ) # type: Optional[str]
- unicode_range_b = unicode_range(character) # type: Optional[str]
-
- if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
- self._suspicious_successive_range_count += 1
-
- self._last_printable_seen = character
-
- def reset(self) -> None: # pragma: no cover
- self._character_count = 0
- self._suspicious_successive_range_count = 0
- self._last_printable_seen = None
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
-
- ratio_of_suspicious_range_usage = (
- self._suspicious_successive_range_count * 2
- ) / self._character_count # type: float
-
- if ratio_of_suspicious_range_usage < 0.1:
- return 0.0
-
- return ratio_of_suspicious_range_usage
-
-
-class SuperWeirdWordPlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._word_count = 0 # type: int
- self._bad_word_count = 0 # type: int
- self._foreign_long_count = 0 # type: int
-
- self._is_current_word_bad = False # type: bool
- self._foreign_long_watch = False # type: bool
-
- self._character_count = 0 # type: int
- self._bad_character_count = 0 # type: int
-
- self._buffer = "" # type: str
- self._buffer_accent_count = 0 # type: int
-
- def eligible(self, character: str) -> bool:
- return True
-
- def feed(self, character: str) -> None:
- if character.isalpha():
- self._buffer = "".join([self._buffer, character])
- if is_accentuated(character):
- self._buffer_accent_count += 1
- if (
- self._foreign_long_watch is False
- and (is_latin(character) is False or is_accentuated(character))
- and is_cjk(character) is False
- and is_hangul(character) is False
- and is_katakana(character) is False
- and is_hiragana(character) is False
- and is_thai(character) is False
- ):
- self._foreign_long_watch = True
- return
- if not self._buffer:
- return
- if (
- character.isspace() or is_punctuation(character) or is_separator(character)
- ) and self._buffer:
- self._word_count += 1
- buffer_length = len(self._buffer) # type: int
-
- self._character_count += buffer_length
-
- if buffer_length >= 4:
- if self._buffer_accent_count / buffer_length > 0.34:
- self._is_current_word_bad = True
- # Word/Buffer ending with a upper case accentuated letter are so rare,
- # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
- if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper():
- self._foreign_long_count += 1
- self._is_current_word_bad = True
- if buffer_length >= 24 and self._foreign_long_watch:
- self._foreign_long_count += 1
- self._is_current_word_bad = True
-
- if self._is_current_word_bad:
- self._bad_word_count += 1
- self._bad_character_count += len(self._buffer)
- self._is_current_word_bad = False
-
- self._foreign_long_watch = False
- self._buffer = ""
- self._buffer_accent_count = 0
- elif (
- character not in {"<", ">", "-", "=", "~", "|", "_"}
- and character.isdigit() is False
- and is_symbol(character)
- ):
- self._is_current_word_bad = True
- self._buffer += character
-
- def reset(self) -> None: # pragma: no cover
- self._buffer = ""
- self._is_current_word_bad = False
- self._foreign_long_watch = False
- self._bad_word_count = 0
- self._word_count = 0
- self._character_count = 0
- self._bad_character_count = 0
- self._foreign_long_count = 0
-
- @property
- def ratio(self) -> float:
- if self._word_count <= 10 and self._foreign_long_count == 0:
- return 0.0
-
- return self._bad_character_count / self._character_count
-
-
-class CjkInvalidStopPlugin(MessDetectorPlugin):
- """
- GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
- can be easily detected. Searching for the overuse of '丅' and '丄'.
- """
-
- def __init__(self) -> None:
- self._wrong_stop_count = 0 # type: int
- self._cjk_character_count = 0 # type: int
-
- def eligible(self, character: str) -> bool:
- return True
-
- def feed(self, character: str) -> None:
- if character in {"丅", "丄"}:
- self._wrong_stop_count += 1
- return
- if is_cjk(character):
- self._cjk_character_count += 1
-
- def reset(self) -> None: # pragma: no cover
- self._wrong_stop_count = 0
- self._cjk_character_count = 0
-
- @property
- def ratio(self) -> float:
- if self._cjk_character_count < 16:
- return 0.0
- return self._wrong_stop_count / self._cjk_character_count
-
-
-class ArchaicUpperLowerPlugin(MessDetectorPlugin):
- def __init__(self) -> None:
- self._buf = False # type: bool
-
- self._character_count_since_last_sep = 0 # type: int
-
- self._successive_upper_lower_count = 0 # type: int
- self._successive_upper_lower_count_final = 0 # type: int
-
- self._character_count = 0 # type: int
-
- self._last_alpha_seen = None # type: Optional[str]
- self._current_ascii_only = True # type: bool
-
- def eligible(self, character: str) -> bool:
- return True
-
- def feed(self, character: str) -> None:
- is_concerned = character.isalpha() and is_case_variable(character)
- chunk_sep = is_concerned is False
-
- if chunk_sep and self._character_count_since_last_sep > 0:
- if (
- self._character_count_since_last_sep <= 64
- and character.isdigit() is False
- and self._current_ascii_only is False
- ):
- self._successive_upper_lower_count_final += (
- self._successive_upper_lower_count
- )
-
- self._successive_upper_lower_count = 0
- self._character_count_since_last_sep = 0
- self._last_alpha_seen = None
- self._buf = False
- self._character_count += 1
- self._current_ascii_only = True
-
- return
-
- if self._current_ascii_only is True and is_ascii(character) is False:
- self._current_ascii_only = False
-
- if self._last_alpha_seen is not None:
- if (character.isupper() and self._last_alpha_seen.islower()) or (
- character.islower() and self._last_alpha_seen.isupper()
- ):
- if self._buf is True:
- self._successive_upper_lower_count += 2
- self._buf = False
- else:
- self._buf = True
- else:
- self._buf = False
-
- self._character_count += 1
- self._character_count_since_last_sep += 1
- self._last_alpha_seen = character
-
- def reset(self) -> None: # pragma: no cover
- self._character_count = 0
- self._character_count_since_last_sep = 0
- self._successive_upper_lower_count = 0
- self._successive_upper_lower_count_final = 0
- self._last_alpha_seen = None
- self._buf = False
- self._current_ascii_only = True
-
- @property
- def ratio(self) -> float:
- if self._character_count == 0:
- return 0.0
-
- return self._successive_upper_lower_count_final / self._character_count
-
-
-def is_suspiciously_successive_range(
- unicode_range_a: Optional[str], unicode_range_b: Optional[str]
-) -> bool:
- """
- Determine if two Unicode range seen next to each other can be considered as suspicious.
- """
- if unicode_range_a is None or unicode_range_b is None:
- return True
-
- if unicode_range_a == unicode_range_b:
- return False
-
- if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
- return False
-
- if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
- return False
-
- # Latin characters can be accompanied with a combining diacritical mark
- # eg. Vietnamese.
- if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
- "Combining" in unicode_range_a or "Combining" in unicode_range_b
- ):
- return False
-
- keywords_range_a, keywords_range_b = unicode_range_a.split(
- " "
- ), unicode_range_b.split(" ")
-
- for el in keywords_range_a:
- if el in UNICODE_SECONDARY_RANGE_KEYWORD:
- continue
- if el in keywords_range_b:
- return False
-
- # Japanese Exception
- range_a_jp_chars, range_b_jp_chars = (
- unicode_range_a
- in (
- "Hiragana",
- "Katakana",
- ),
- unicode_range_b in ("Hiragana", "Katakana"),
- )
- if (range_a_jp_chars or range_b_jp_chars) and (
- "CJK" in unicode_range_a or "CJK" in unicode_range_b
- ):
- return False
- if range_a_jp_chars and range_b_jp_chars:
- return False
-
- if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
- if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
- return False
- if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
- return False
-
- # Chinese/Japanese use dedicated range for punctuation and/or separators.
- if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
- unicode_range_a in ["Katakana", "Hiragana"]
- and unicode_range_b in ["Katakana", "Hiragana"]
- ):
- if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
- return False
- if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
- return False
-
- return True
-
-
-@lru_cache(maxsize=2048)
-def mess_ratio(
- decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
-) -> float:
- """
- Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
- """
-
- detectors = [
- md_class() for md_class in MessDetectorPlugin.__subclasses__()
- ] # type: List[MessDetectorPlugin]
-
- length = len(decoded_sequence) + 1 # type: int
-
- mean_mess_ratio = 0.0 # type: float
-
- if length < 512:
- intermediary_mean_mess_ratio_calc = 32 # type: int
- elif length <= 1024:
- intermediary_mean_mess_ratio_calc = 64
- else:
- intermediary_mean_mess_ratio_calc = 128
-
- for character, index in zip(decoded_sequence + "\n", range(length)):
- for detector in detectors:
- if detector.eligible(character):
- detector.feed(character)
-
- if (
- index > 0 and index % intermediary_mean_mess_ratio_calc == 0
- ) or index == length - 1:
- mean_mess_ratio = sum(dt.ratio for dt in detectors)
-
- if mean_mess_ratio >= maximum_threshold:
- break
-
- if debug:
- for dt in detectors: # pragma: nocover
- print(dt.__class__, dt.ratio)
-
- return round(mean_mess_ratio, 3)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/models.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/models.py
deleted file mode 100644
index c38da31f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/models.py
+++ /dev/null
@@ -1,392 +0,0 @@
-import warnings
-from collections import Counter
-from encodings.aliases import aliases
-from hashlib import sha256
-from json import dumps
-from re import sub
-from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
-
-from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
-from .md import mess_ratio
-from .utils import iana_name, is_multi_byte_encoding, unicode_range
-
-
-class CharsetMatch:
- def __init__(
- self,
- payload: bytes,
- guessed_encoding: str,
- mean_mess_ratio: float,
- has_sig_or_bom: bool,
- languages: "CoherenceMatches",
- decoded_payload: Optional[str] = None,
- ):
- self._payload = payload # type: bytes
-
- self._encoding = guessed_encoding # type: str
- self._mean_mess_ratio = mean_mess_ratio # type: float
- self._languages = languages # type: CoherenceMatches
- self._has_sig_or_bom = has_sig_or_bom # type: bool
- self._unicode_ranges = None # type: Optional[List[str]]
-
- self._leaves = [] # type: List[CharsetMatch]
- self._mean_coherence_ratio = 0.0 # type: float
-
- self._output_payload = None # type: Optional[bytes]
- self._output_encoding = None # type: Optional[str]
-
- self._string = decoded_payload # type: Optional[str]
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, CharsetMatch):
- raise TypeError(
- "__eq__ cannot be invoked on {} and {}.".format(
- str(other.__class__), str(self.__class__)
- )
- )
- return self.encoding == other.encoding and self.fingerprint == other.fingerprint
-
- def __lt__(self, other: object) -> bool:
- """
- Implemented to make sorted available upon CharsetMatches items.
- """
- if not isinstance(other, CharsetMatch):
- raise ValueError
-
- chaos_difference = abs(self.chaos - other.chaos) # type: float
- coherence_difference = abs(self.coherence - other.coherence) # type: float
-
- # Bellow 1% difference --> Use Coherence
- if chaos_difference < 0.01 and coherence_difference > 0.02:
- # When having a tough decision, use the result that decoded as many multi-byte as possible.
- if chaos_difference == 0.0 and self.coherence == other.coherence:
- return self.multi_byte_usage > other.multi_byte_usage
- return self.coherence > other.coherence
-
- return self.chaos < other.chaos
-
- @property
- def multi_byte_usage(self) -> float:
- return 1.0 - len(str(self)) / len(self.raw)
-
- @property
- def chaos_secondary_pass(self) -> float:
- """
- Check once again chaos in decoded text, except this time, with full content.
- Use with caution, this can be very slow.
- Notice: Will be removed in 3.0
- """
- warnings.warn(
- "chaos_secondary_pass is deprecated and will be removed in 3.0",
- DeprecationWarning,
- )
- return mess_ratio(str(self), 1.0)
-
- @property
- def coherence_non_latin(self) -> float:
- """
- Coherence ratio on the first non-latin language detected if ANY.
- Notice: Will be removed in 3.0
- """
- warnings.warn(
- "coherence_non_latin is deprecated and will be removed in 3.0",
- DeprecationWarning,
- )
- return 0.0
-
- @property
- def w_counter(self) -> Counter:
- """
- Word counter instance on decoded text.
- Notice: Will be removed in 3.0
- """
- warnings.warn(
- "w_counter is deprecated and will be removed in 3.0", DeprecationWarning
- )
-
- string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
-
- return Counter(string_printable_only.split())
-
- def __str__(self) -> str:
- # Lazy Str Loading
- if self._string is None:
- self._string = str(self._payload, self._encoding, "strict")
- return self._string
-
- def __repr__(self) -> str:
- return "".format(self.encoding, self.fingerprint)
-
- def add_submatch(self, other: "CharsetMatch") -> None:
- if not isinstance(other, CharsetMatch) or other == self:
- raise ValueError(
- "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
- other.__class__
- )
- )
-
- other._string = None # Unload RAM usage; dirty trick.
- self._leaves.append(other)
-
- @property
- def encoding(self) -> str:
- return self._encoding
-
- @property
- def encoding_aliases(self) -> List[str]:
- """
- Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
- """
- also_known_as = [] # type: List[str]
- for u, p in aliases.items():
- if self.encoding == u:
- also_known_as.append(p)
- elif self.encoding == p:
- also_known_as.append(u)
- return also_known_as
-
- @property
- def bom(self) -> bool:
- return self._has_sig_or_bom
-
- @property
- def byte_order_mark(self) -> bool:
- return self._has_sig_or_bom
-
- @property
- def languages(self) -> List[str]:
- """
- Return the complete list of possible languages found in decoded sequence.
- Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
- """
- return [e[0] for e in self._languages]
-
- @property
- def language(self) -> str:
- """
- Most probable language found in decoded sequence. If none were detected or inferred, the property will return
- "Unknown".
- """
- if not self._languages:
- # Trying to infer the language based on the given encoding
- # Its either English or we should not pronounce ourselves in certain cases.
- if "ascii" in self.could_be_from_charset:
- return "English"
-
- # doing it there to avoid circular import
- from charset_normalizer.cd import encoding_languages, mb_encoding_languages
-
- languages = (
- mb_encoding_languages(self.encoding)
- if is_multi_byte_encoding(self.encoding)
- else encoding_languages(self.encoding)
- )
-
- if len(languages) == 0 or "Latin Based" in languages:
- return "Unknown"
-
- return languages[0]
-
- return self._languages[0][0]
-
- @property
- def chaos(self) -> float:
- return self._mean_mess_ratio
-
- @property
- def coherence(self) -> float:
- if not self._languages:
- return 0.0
- return self._languages[0][1]
-
- @property
- def percent_chaos(self) -> float:
- return round(self.chaos * 100, ndigits=3)
-
- @property
- def percent_coherence(self) -> float:
- return round(self.coherence * 100, ndigits=3)
-
- @property
- def raw(self) -> bytes:
- """
- Original untouched bytes.
- """
- return self._payload
-
- @property
- def submatch(self) -> List["CharsetMatch"]:
- return self._leaves
-
- @property
- def has_submatch(self) -> bool:
- return len(self._leaves) > 0
-
- @property
- def alphabets(self) -> List[str]:
- if self._unicode_ranges is not None:
- return self._unicode_ranges
- # list detected ranges
- detected_ranges = [
- unicode_range(char) for char in str(self)
- ] # type: List[Optional[str]]
- # filter and sort
- self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
- return self._unicode_ranges
-
- @property
- def could_be_from_charset(self) -> List[str]:
- """
- The complete list of encoding that output the exact SAME str result and therefore could be the originating
- encoding.
- This list does include the encoding available in property 'encoding'.
- """
- return [self._encoding] + [m.encoding for m in self._leaves]
-
- def first(self) -> "CharsetMatch":
- """
- Kept for BC reasons. Will be removed in 3.0.
- """
- return self
-
- def best(self) -> "CharsetMatch":
- """
- Kept for BC reasons. Will be removed in 3.0.
- """
- return self
-
- def output(self, encoding: str = "utf_8") -> bytes:
- """
- Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
- Any errors will be simply ignored by the encoder NOT replaced.
- """
- if self._output_encoding is None or self._output_encoding != encoding:
- self._output_encoding = encoding
- self._output_payload = str(self).encode(encoding, "replace")
-
- return self._output_payload # type: ignore
-
- @property
- def fingerprint(self) -> str:
- """
- Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
- """
- return sha256(self.output()).hexdigest()
-
-
-class CharsetMatches:
- """
- Container with every CharsetMatch items ordered by default from most probable to the less one.
- Act like a list(iterable) but does not implements all related methods.
- """
-
- def __init__(self, results: List[CharsetMatch] = None):
- self._results = sorted(results) if results else [] # type: List[CharsetMatch]
-
- def __iter__(self) -> Iterator[CharsetMatch]:
- yield from self._results
-
- def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
- """
- Retrieve a single item either by its position or encoding name (alias may be used here).
- Raise KeyError upon invalid index or encoding not present in results.
- """
- if isinstance(item, int):
- return self._results[item]
- if isinstance(item, str):
- item = iana_name(item, False)
- for result in self._results:
- if item in result.could_be_from_charset:
- return result
- raise KeyError
-
- def __len__(self) -> int:
- return len(self._results)
-
- def __bool__(self) -> bool:
- return len(self._results) > 0
-
- def append(self, item: CharsetMatch) -> None:
- """
- Insert a single match. Will be inserted accordingly to preserve sort.
- Can be inserted as a submatch.
- """
- if not isinstance(item, CharsetMatch):
- raise ValueError(
- "Cannot append instance '{}' to CharsetMatches".format(
- str(item.__class__)
- )
- )
- # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
- if len(item.raw) <= TOO_BIG_SEQUENCE:
- for match in self._results:
- if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
- match.add_submatch(item)
- return
- self._results.append(item)
- self._results = sorted(self._results)
-
- def best(self) -> Optional["CharsetMatch"]:
- """
- Simply return the first match. Strict equivalent to matches[0].
- """
- if not self._results:
- return None
- return self._results[0]
-
- def first(self) -> Optional["CharsetMatch"]:
- """
- Redundant method, call the method best(). Kept for BC reasons.
- """
- return self.best()
-
-
-CoherenceMatch = Tuple[str, float]
-CoherenceMatches = List[CoherenceMatch]
-
-
-class CliDetectionResult:
- def __init__(
- self,
- path: str,
- encoding: Optional[str],
- encoding_aliases: List[str],
- alternative_encodings: List[str],
- language: str,
- alphabets: List[str],
- has_sig_or_bom: bool,
- chaos: float,
- coherence: float,
- unicode_path: Optional[str],
- is_preferred: bool,
- ):
- self.path = path # type: str
- self.unicode_path = unicode_path # type: Optional[str]
- self.encoding = encoding # type: Optional[str]
- self.encoding_aliases = encoding_aliases # type: List[str]
- self.alternative_encodings = alternative_encodings # type: List[str]
- self.language = language # type: str
- self.alphabets = alphabets # type: List[str]
- self.has_sig_or_bom = has_sig_or_bom # type: bool
- self.chaos = chaos # type: float
- self.coherence = coherence # type: float
- self.is_preferred = is_preferred # type: bool
-
- @property
- def __dict__(self) -> Dict[str, Any]: # type: ignore
- return {
- "path": self.path,
- "encoding": self.encoding,
- "encoding_aliases": self.encoding_aliases,
- "alternative_encodings": self.alternative_encodings,
- "language": self.language,
- "alphabets": self.alphabets,
- "has_sig_or_bom": self.has_sig_or_bom,
- "chaos": self.chaos,
- "coherence": self.coherence,
- "unicode_path": self.unicode_path,
- "is_preferred": self.is_preferred,
- }
-
- def to_json(self) -> str:
- return dumps(self.__dict__, ensure_ascii=True, indent=4)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/py.typed b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/py.typed
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/utils.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/utils.py
deleted file mode 100644
index dcb14dfe..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/utils.py
+++ /dev/null
@@ -1,342 +0,0 @@
-try:
- import unicodedata2 as unicodedata
-except ImportError:
- import unicodedata # type: ignore[no-redef]
-
-import importlib
-import logging
-from codecs import IncrementalDecoder
-from encodings.aliases import aliases
-from functools import lru_cache
-from re import findall
-from typing import List, Optional, Set, Tuple, Union
-
-from _multibytecodec import MultibyteIncrementalDecoder # type: ignore
-
-from .constant import (
- ENCODING_MARKS,
- IANA_SUPPORTED_SIMILAR,
- RE_POSSIBLE_ENCODING_INDICATION,
- UNICODE_RANGES_COMBINED,
- UNICODE_SECONDARY_RANGE_KEYWORD,
- UTF8_MAXIMAL_ALLOCATION,
-)
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_accentuated(character: str) -> bool:
- try:
- description = unicodedata.name(character) # type: str
- except ValueError:
- return False
- return (
- "WITH GRAVE" in description
- or "WITH ACUTE" in description
- or "WITH CEDILLA" in description
- or "WITH DIAERESIS" in description
- or "WITH CIRCUMFLEX" in description
- or "WITH TILDE" in description
- )
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def remove_accent(character: str) -> str:
- decomposed = unicodedata.decomposition(character) # type: str
- if not decomposed:
- return character
-
- codes = decomposed.split(" ") # type: List[str]
-
- return chr(int(codes[0], 16))
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def unicode_range(character: str) -> Optional[str]:
- """
- Retrieve the Unicode range official name from a single character.
- """
- character_ord = ord(character) # type: int
-
- for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
- if character_ord in ord_range:
- return range_name
-
- return None
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_latin(character: str) -> bool:
- try:
- description = unicodedata.name(character) # type: str
- except ValueError:
- return False
- return "LATIN" in description
-
-
-def is_ascii(character: str) -> bool:
- try:
- character.encode("ascii")
- except UnicodeEncodeError:
- return False
- return True
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_punctuation(character: str) -> bool:
- character_category = unicodedata.category(character) # type: str
-
- if "P" in character_category:
- return True
-
- character_range = unicode_range(character) # type: Optional[str]
-
- if character_range is None:
- return False
-
- return "Punctuation" in character_range
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_symbol(character: str) -> bool:
- character_category = unicodedata.category(character) # type: str
-
- if "S" in character_category or "N" in character_category:
- return True
-
- character_range = unicode_range(character) # type: Optional[str]
-
- if character_range is None:
- return False
-
- return "Forms" in character_range
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_emoticon(character: str) -> bool:
- character_range = unicode_range(character) # type: Optional[str]
-
- if character_range is None:
- return False
-
- return "Emoticons" in character_range
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_separator(character: str) -> bool:
- if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}:
- return True
-
- character_category = unicodedata.category(character) # type: str
-
- return "Z" in character_category
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_case_variable(character: str) -> bool:
- return character.islower() != character.isupper()
-
-
-def is_private_use_only(character: str) -> bool:
- character_category = unicodedata.category(character) # type: str
-
- return character_category == "Co"
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_cjk(character: str) -> bool:
- try:
- character_name = unicodedata.name(character)
- except ValueError:
- return False
-
- return "CJK" in character_name
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_hiragana(character: str) -> bool:
- try:
- character_name = unicodedata.name(character)
- except ValueError:
- return False
-
- return "HIRAGANA" in character_name
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_katakana(character: str) -> bool:
- try:
- character_name = unicodedata.name(character)
- except ValueError:
- return False
-
- return "KATAKANA" in character_name
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_hangul(character: str) -> bool:
- try:
- character_name = unicodedata.name(character)
- except ValueError:
- return False
-
- return "HANGUL" in character_name
-
-
-@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
-def is_thai(character: str) -> bool:
- try:
- character_name = unicodedata.name(character)
- except ValueError:
- return False
-
- return "THAI" in character_name
-
-
-@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
-def is_unicode_range_secondary(range_name: str) -> bool:
- return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
-
-
-def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
- """
- Extract using ASCII-only decoder any specified encoding in the first n-bytes.
- """
- if not isinstance(sequence, bytes):
- raise TypeError
-
- seq_len = len(sequence) # type: int
-
- results = findall(
- RE_POSSIBLE_ENCODING_INDICATION,
- sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
- ) # type: List[str]
-
- if len(results) == 0:
- return None
-
- for specified_encoding in results:
- specified_encoding = specified_encoding.lower().replace("-", "_")
-
- for encoding_alias, encoding_iana in aliases.items():
- if encoding_alias == specified_encoding:
- return encoding_iana
- if encoding_iana == specified_encoding:
- return encoding_iana
-
- return None
-
-
-@lru_cache(maxsize=128)
-def is_multi_byte_encoding(name: str) -> bool:
- """
- Verify is a specific encoding is a multi byte one based on it IANA name
- """
- return name in {
- "utf_8",
- "utf_8_sig",
- "utf_16",
- "utf_16_be",
- "utf_16_le",
- "utf_32",
- "utf_32_le",
- "utf_32_be",
- "utf_7",
- } or issubclass(
- importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, # type: ignore
- MultibyteIncrementalDecoder,
- )
-
-
-def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
- """
- Identify and extract SIG/BOM in given sequence.
- """
-
- for iana_encoding in ENCODING_MARKS:
- marks = ENCODING_MARKS[iana_encoding] # type: Union[bytes, List[bytes]]
-
- if isinstance(marks, bytes):
- marks = [marks]
-
- for mark in marks:
- if sequence.startswith(mark):
- return iana_encoding, mark
-
- return None, b""
-
-
-def should_strip_sig_or_bom(iana_encoding: str) -> bool:
- return iana_encoding not in {"utf_16", "utf_32"}
-
-
-def iana_name(cp_name: str, strict: bool = True) -> str:
- cp_name = cp_name.lower().replace("-", "_")
-
- for encoding_alias, encoding_iana in aliases.items():
- if cp_name in [encoding_alias, encoding_iana]:
- return encoding_iana
-
- if strict:
- raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
-
- return cp_name
-
-
-def range_scan(decoded_sequence: str) -> List[str]:
- ranges = set() # type: Set[str]
-
- for character in decoded_sequence:
- character_range = unicode_range(character) # type: Optional[str]
-
- if character_range is None:
- continue
-
- ranges.add(character_range)
-
- return list(ranges)
-
-
-def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
-
- if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
- return 0.0
-
- decoder_a = importlib.import_module("encodings.{}".format(iana_name_a)).IncrementalDecoder # type: ignore
- decoder_b = importlib.import_module("encodings.{}".format(iana_name_b)).IncrementalDecoder # type: ignore
-
- id_a = decoder_a(errors="ignore") # type: IncrementalDecoder
- id_b = decoder_b(errors="ignore") # type: IncrementalDecoder
-
- character_match_count = 0 # type: int
-
- for i in range(255):
- to_be_decoded = bytes([i]) # type: bytes
- if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
- character_match_count += 1
-
- return character_match_count / 254
-
-
-def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
- """
- Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
- the function cp_similarity.
- """
- return (
- iana_name_a in IANA_SUPPORTED_SIMILAR
- and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
- )
-
-
-def set_logging_handler(
- name: str = "charset_normalizer",
- level: int = logging.INFO,
- format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
-) -> None:
-
- logger = logging.getLogger(name)
- logger.setLevel(level)
-
- handler = logging.StreamHandler()
- handler.setFormatter(logging.Formatter(format_string))
- logger.addHandler(handler)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/version.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/version.py
deleted file mode 100644
index 77cfff25..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/version.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""
-Expose version
-"""
-
-__version__ = "2.0.12"
-VERSION = __version__.split(".")
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/METADATA
deleted file mode 100644
index 2bc50cfa..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/METADATA
+++ /dev/null
@@ -1,32 +0,0 @@
-Metadata-Version: 2.1
-Name: cloudconnectlib
-Version: 3.0.1b1
-Summary: APP Cloud Connect
-License: Apache-2.0
-Author: Addon Factory template
-Author-email: addonfactory@splunk.com
-Requires-Python: >=3.7,<4.0
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Requires-Dist: decorator (>=4,<5)
-Requires-Dist: future
-Requires-Dist: httplib2 (>=0.19.1,<0.20.0)
-Requires-Dist: jinja2 (>=2.11.3,<3.0.0)
-Requires-Dist: jsl (>=0.2.4,<0.3.0)
-Requires-Dist: jsonpath-ng (>=1.5.2,<2.0.0)
-Requires-Dist: jsonpath-rw (>=1.4.0,<2.0.0)
-Requires-Dist: jsonschema (>=3.2.0,<4.0.0)
-Requires-Dist: munch (>=2.3.2,<3.0.0)
-Requires-Dist: requests (>=2.25.1,<3.0.0)
-Requires-Dist: six
-Requires-Dist: solnlib (>=4.1.0,<5.0.0)
-Requires-Dist: sortedcontainers (>=2.3.0,<3.0.0)
-Requires-Dist: splunk-sdk (>=1.6,<2.0)
-Requires-Dist: splunktalib (>=2,<3)
-Requires-Dist: splunktaucclib (>=5,<6)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/RECORD
deleted file mode 100644
index b408d791..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/RECORD
+++ /dev/null
@@ -1,47 +0,0 @@
-cloudconnectlib-3.0.1b1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-cloudconnectlib-3.0.1b1.dist-info/METADATA,sha256=zIfypa7ib0nX4_BDQQuIEiF6dQ3zb-Q2nLW5z8sjrwM,1208
-cloudconnectlib-3.0.1b1.dist-info/RECORD,,
-cloudconnectlib-3.0.1b1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-cloudconnectlib-3.0.1b1.dist-info/WHEEL,sha256=V7iVckP-GYreevsTDnv1eAinQt_aArwnAxmnP0gygBY,83
-cloudconnectlib/__init__.py,sha256=LX0oPKTm3RdEe2ugCrqBoMIS9iQIS9lcBZ0hUqRObKY,773
-cloudconnectlib/client.py,sha256=KVdsm08ROyYSt0yFUcFrtxbsgEE7vzcpWleAtDSarSk,3180
-cloudconnectlib/common/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575
-cloudconnectlib/common/lib_util.py,sha256=4XEHpcFs2oOUylqa8KQp_90UQAXMCHIPz7VrwBjzsII,2115
-cloudconnectlib/common/log.py,sha256=3SF9F4IDjcpYaFrrMxgNNI4Yjja1AAFgtVzFGahMUpU,1896
-cloudconnectlib/common/util.py,sha256=cjb2ELvt4dxnCynxQxdvsLsMwBw7m22aT1NKRTRlccA,2058
-cloudconnectlib/configuration/__init__.py,sha256=vDmcGJjnxxkHIICiIOeK0FSJK_hxl3SJGHwdCDbYzMc,617
-cloudconnectlib/configuration/loader.py,sha256=ETALdhUdyOtafdnWQ7vkSRLsTOlzeXrCFYKJkGO9a4o,10699
-cloudconnectlib/configuration/schema_1_0_0.json,sha256=WKWbZJAyWu-rt-OLwvNoMo_UqBi3iwxHbIcRy0aSRy0,11645
-cloudconnectlib/core/__init__.py,sha256=02DdxhQvHpl-ncbu3B3GY1qvoWVHhhpSyYP4Gez9bxU,665
-cloudconnectlib/core/cacerts/ca_certs_locater.py,sha256=PtvX2_8dERSUnANIOTISBT8-QSyf6SOdDSFHH6Ls-og,4889
-cloudconnectlib/core/checkpoint.py,sha256=n45i3EPS8CNkJDIzmTQT0BNpmY8Fn6UNBdk76Wyacpo,1918
-cloudconnectlib/core/defaults.py,sha256=93J1wkfTVW4qVA0GjtDMGtPRhr8Uhd2_WZCdei5eOJI,1288
-cloudconnectlib/core/engine.py,sha256=nChmGutqzx0sfmd5-YaOfDNDXeujgeD5miSFTGNuV5U,10884
-cloudconnectlib/core/engine_v2.py,sha256=vXIhijoWY4p-tNYussNBvwFkIjXCaytdntf2yD_1jFg,4985
-cloudconnectlib/core/exceptions.py,sha256=GLDNVKann_M19kadClH7PLbcgDXaXRRowYUM4c7dWkY,1326
-cloudconnectlib/core/ext.py,sha256=CzhzOz8CFGKG4hbBci4vwmGBl3zI2qQDsr4AA9U_I9g,13104
-cloudconnectlib/core/http.py,sha256=zb7aJ4O2Vmkonqg-9azKcq86bxLwQHRpszHvK1nwRr8,10632
-cloudconnectlib/core/job.py,sha256=FRUs9KQeF-tFFIlBgkdBRNr0uDBvVsY19xpzhVTNtz4,4880
-cloudconnectlib/core/models.py,sha256=bPEMXKCv_UxsWGOvr-ypCQr_ie6DQaQrrzqWBPlmmE8,10190
-cloudconnectlib/core/pipemgr.py,sha256=KHnwa0O1rpnpFAgRS4Xgq5t1pU2Llc7xk6yJDtbd19s,1037
-cloudconnectlib/core/plugin.py,sha256=mI1u7OOfcB04stG_sk5ANUT-KJc8MgINk4RgTIQqVKU,3667
-cloudconnectlib/core/task.py,sha256=YL71H8B6SPoGkhOrodMoQpOwDSp8vIOztp3sGmFhDYs,18687
-cloudconnectlib/core/template.py,sha256=ROHW-eR7FKeyht5_cDlHZt-3f6VM4y4DfCe_FnjEWuo,1152
-cloudconnectlib/splunktacollectorlib/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575
-cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py,sha256=EWttmxmSVLNrwleV65Xf_v-ts6R-Gk3nSYWWOQ7xgFA,2990
-cloudconnectlib/splunktacollectorlib/common/__init__.py,sha256=J2HhI2PL-5WtAl4FYqQBbV7ru86w7FsmVXyFJz9xPlc,1625
-cloudconnectlib/splunktacollectorlib/common/log.py,sha256=SaiYUrl7UmzfqxhmefR-eKSfJ5JHfm_9mN5JwgVd2WU,1974
-cloudconnectlib/splunktacollectorlib/common/rwlock.py,sha256=pBLwQ9u4jcz4K419XW7Lp21zj5QyVpVnXmzZERdEglk,2119
-cloudconnectlib/splunktacollectorlib/common/schema_meta.py,sha256=p3MMcXfrLWkCgfdg_vnPrHIyrAJaAqkDX0gDA1NV1Vk,803
-cloudconnectlib/splunktacollectorlib/config.py,sha256=naUrLtDgMIFQyVyfsqFAtQJIMKL8oQ7MxjkbuLSsplQ,14724
-cloudconnectlib/splunktacollectorlib/data_collection/__init__.py,sha256=OSP5_M7Nh4SvlJT246Qfn-LRRhsVzhhY948E7ffHAKo,596
-cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py,sha256=erJ4KwUAsCuqOMOfW7Tkns5uI3Ksoe8hSGGDKVw2opc,6025
-cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py,sha256=NKncmmObdJl-Nmd5DOWNS7DBgng3uiV2Xg7UCD1Z8jg,6859
-cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py,sha256=bvwnVB-1PJLK_FaLiaGpycpzQf4AmRujNvwy28OwQkI,1897
-cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py,sha256=209EqJwpeymsAVS7GE2ZwFyPVf1KU9M7yS8VlItaLcA,3345
-cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py,sha256=EI3CPiNWfMFJ8j-Hcq0_9rXE-_WndYVXEOVRdVH6L0Y,5967
-cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py,sha256=RrY2LqAXmqkDtWzYgja4o0ADEBzCkDZy0NV0OYXAQpE,5714
-cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py,sha256=KMFAPwE8liOPW4sUancUiYJV6wNlbIQ77Uf--EmJLW0,5924
-cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py,sha256=qV530k2jo1mzzolb7-EJcwx2VBq8asZwzWzUlMUFvpA,8474
-cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py,sha256=LR4encL187WnyNPp9vtVd6PAZxMUjNCriOEwpN2gR40,1133
-cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py,sha256=PgNr7HAeRz8l-B2HnBCHMdv_qAkXRflXAqkn9aTaXqo,2084
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/WHEEL
deleted file mode 100644
index 862df1fb..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib-3.0.1b1.dist-info/WHEEL
+++ /dev/null
@@ -1,4 +0,0 @@
-Wheel-Version: 1.0
-Generator: poetry 1.0.3
-Root-Is-Purelib: true
-Tag: py3-none-any
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/__init__.py
index 1ba76347..ab2b02fe 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/__init__.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
APP Cloud Connect
"""
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/client.py
index 22c93a8f..1807db47 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/client.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/client.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import copy
import os.path
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/__init__.py
index 72d45097..e69de29b 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/__init__.py
@@ -1,15 +0,0 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/lib_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/lib_util.py
index c1fee2e6..08014a49 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/lib_util.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/lib_util.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import os
import os.path as op
import platform
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/log.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/log.py
index 907d56dd..f310e7d1 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/log.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/log.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import logging
from solnlib.pattern import Singleton
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/util.py
index e072ac74..1d1133ea 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/util.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/common/util.py
@@ -1,20 +1,5 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import json
-from splunktalib.common import util
+from ..splunktalib.common import util
from solnlib.modular_input.event import XMLEvent
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/__init__.py
index 65f309eb..4b1c2b18 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/__init__.py
@@ -1,16 +1 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from .loader import get_loader_by_version
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/loader.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/loader.py
index 2c59ec3b..deaed280 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/loader.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/configuration/loader.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import str
from builtins import object
import logging
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/__init__.py
index 3cd87a05..c2424fb1 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/__init__.py
@@ -1,17 +1,2 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from .engine import CloudConnectEngine
from .exceptions import ConfigException, HTTPError
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py
index 44040b24..c05f8752 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
`ca_certs_locater` is a lib for extending httplib2 to allow system certificate store to be used when
verifying SSL certificates, to enable this lib, you should add it to your python import path before
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/checkpoint.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/checkpoint.py
old mode 100644
new mode 100755
index 6aacd2c1..9312d37b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/checkpoint.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/checkpoint.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import cloudconnectlib.splunktacollectorlib.data_collection.ta_checkpoint_manager as tacm
from cloudconnectlib.common.log import get_cc_logger
from cloudconnectlib.core.models import _Token, DictToken
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/defaults.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/defaults.py
index 2144e869..84d79897 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/defaults.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/defaults.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""Default config for cloud connect"""
timeout = 120 # request timeout is two minutes
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine.py
index 5e1da2d1..1ea24134 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import threading
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine_v2.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine_v2.py
old mode 100644
new mode 100755
index a777d7ab..56999caf
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine_v2.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/engine_v2.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import concurrent.futures as cf
import threading
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/exceptions.py
index 87386828..039f2d41 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/exceptions.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/exceptions.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""APP Cloud Connect errors"""
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/ext.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/ext.py
index cc93735b..9773b94a 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/ext.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/ext.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import str
from builtins import range
import calendar
@@ -23,7 +8,7 @@
from datetime import datetime
import six
-from jsonpath_ng import parse
+from jsonpath_rw import parse
from .exceptions import FuncException, StopCCEIteration, QuitJobError
from .pipemgr import PipeManager
from ..common import util, log
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/http.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/http.py
index fb7d1ad6..761d0ce4 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/http.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/http.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import str
from builtins import range
from builtins import object
@@ -26,7 +11,7 @@
from cloudconnectlib.core import defaults
from cloudconnectlib.core.exceptions import HTTPError
from httplib2 import Http, socks, ProxyInfo
-from requests import PreparedRequest, utils
+from solnlib.packages.requests import PreparedRequest, utils
from solnlib.utils import is_true
try: # Python2 environment support
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/job.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/job.py
old mode 100644
new mode 100755
index 497c9f65..49c111b1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/job.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/job.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import threading
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/models.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/models.py
index 90854c8e..82e09004 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/models.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/models.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import str
from builtins import object
import base64
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/pipemgr.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/pipemgr.py
index d56615a1..97f2744b 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/pipemgr.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/pipemgr.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from __future__ import print_function
from builtins import object
from solnlib.pattern import Singleton
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/plugin.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/plugin.py
old mode 100644
new mode 100755
index d92ca2ef..5c4fc619
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/plugin.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/plugin.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from __future__ import absolute_import
from builtins import next
from .ext import _extension_functions
@@ -102,9 +87,7 @@ def init_pipeline_plugins(plugin_dir):
plugin_dir)
return
- if plugin_dir not in sys.path:
- sys.path.append(plugin_dir)
-
+ sys.path.append(plugin_dir)
for file_name in next(walk(plugin_dir))[2]:
if file_name == "__init__.py" or not file_name.startswith("cce_plugin_"):
continue
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/task.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/task.py
old mode 100644
new mode 100755
index 1b06a280..2a516664
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/task.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/task.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import copy
import threading
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/template.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/template.py
index f4e16fcf..8cef6e90 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/template.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/template.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from jinja2 import Template
import re
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/__init__.py
index 72d45097..e69de29b 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/__init__.py
@@ -1,15 +0,0 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py
index 77f3ab61..380288e3 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from future import standard_library
standard_library.install_aliases()
import configparser
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py
index 966cdf24..4230727e 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import json
import hashlib
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py
index 446e1ac8..4d1ecf03 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py
@@ -1,20 +1,5 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import logging
-from splunktalib.common import log as stclog
+from ...splunktalib.common import log as stclog
import six
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py
index 1825619e..74b3063b 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
This module provides Read-Write lock.
"""
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py
index 7e981916..ec6ce889 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py
@@ -1,18 +1,4 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+
FIELD_PRODUCT = '_product'
FIELD_REST_NAMESPACE = '_rest_namespace'
FIELD_REST_PREFIX = '_rest_prefix'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/config.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/config.py
index 5546a553..590ed1e5 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/config.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/config.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""UCC Config Module
This is for load/save configuration in UCC server or TA.
The load/save action is based on specified schema.
@@ -31,8 +16,8 @@
import time
import six
-from splunktalib.rest import splunkd_request, code_to_msg
-from splunktalib.common import util as sc_util
+from ..splunktalib.rest import splunkd_request, code_to_msg
+from ..splunktalib.common import util as sc_util
from .common import log as stulog
from .common import UCCException
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py
index d40e9365..bb35ee15 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py
@@ -1,16 +1 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
__version__ = "1.0.2"
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py
index b5c56471..74bd80e6 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import object
import json
import re
@@ -20,8 +5,8 @@
from . import ta_consts as c
from . import ta_helper as th
from ..common import log as stulog
-from splunktalib import state_store as ss
-from splunktalib.common.util import is_true
+from ...splunktalib import state_store as ss
+from ...splunktalib.common.util import is_true
class TACheckPointMgr(object):
@@ -66,7 +51,8 @@ def _create_state_store(self, meta_config, storage_type, app_name):
return ss.get_state_store(
meta_config,
app_name,
- use_cached_store=use_cache_file
+ use_cache_file=use_cache_file,
+ max_cache_seconds=max_cache_seconds
)
def _get_collection_name(self):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py
index 6af27b51..25fd8431 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from __future__ import absolute_import
from builtins import object
import os.path as op
@@ -21,9 +6,9 @@
from . import ta_consts as c
from . import ta_helper as th
from ..common import log as stulog
-from splunktalib import modinput as modinput
-from splunktalib import splunk_cluster as sc
-from splunktalib.common import util
+from ...splunktalib import modinput as modinput
+from ...splunktalib import splunk_cluster as sc
+from ...splunktalib.common import util
# methods can be overrided by subclass : process_task_configs
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py
index a1b05085..187ce0f7 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
server_uri = "server_uri"
session_key = "session_key"
version = "version"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py
index 83581668..f553ac67 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py
@@ -1,19 +1,4 @@
#!/usr/bin/python
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from builtins import next
from builtins import object
from . import ta_checkpoint_manager as cp
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
index d81cfd3f..2c2c4eed 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
@@ -1,19 +1,4 @@
#!/usr/bin/python
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from __future__ import absolute_import
from builtins import object
import threading
@@ -22,7 +7,7 @@
from . import ta_consts as c
from ..common import log as stulog
-from splunktalib.common import util as scu
+from ...splunktalib.common import util as scu
evt_fmt = ("{0} "
" "
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py
index ab4edd77..2a1eb848 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
Data Loader main entry point
"""
@@ -25,10 +10,10 @@
import os.path as op
import configparser
-from splunktalib.concurrent import concurrent_executor as ce
-from splunktalib import timer_queue as tq
-from splunktalib.schedule import job as sjob
-from splunktalib.common import log
+from ...splunktalib.concurrent import concurrent_executor as ce
+from ...splunktalib import timer_queue as tq
+from ...splunktalib.schedule import job as sjob
+from ...splunktalib.common import log
class TADataLoader(object):
@@ -135,7 +120,7 @@ def get_event_writer(self):
@staticmethod
def _read_default_settings():
cur_dir = op.dirname(op.abspath(__file__))
- setting_file = op.join(cur_dir,"../../../","splunktalib", "setting.conf")
+ setting_file = op.join(cur_dir,"../../","splunktalib", "setting.conf")
parser = configparser.ConfigParser()
parser.read(setting_file)
settings = {}
@@ -177,8 +162,8 @@ def create_data_loader():
create a data loader with default event_writer, job_scheudler
"""
- from splunktalib import event_writer as ew
- from splunktalib.schedule import scheduler as sched
+ from ...splunktalib import event_writer as ew
+ from ...splunktalib.schedule import scheduler as sched
writer = ew.EventWriter()
scheduler = sched.Scheduler()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py
index d3b8e3ca..e245f319 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
import six
from builtins import object
import hashlib
@@ -31,7 +16,7 @@
from splunktaucclib.global_config import GlobalConfig, GlobalConfigSchema
from . import ta_consts as c
from ...splunktacollectorlib import config as sc
-from splunktalib.common import util
+from ...splunktalib.common import util
def utc2timestamp(human_time):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py
index 55382de0..42c3711b 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py
@@ -1,19 +1,4 @@
#!/usr/bin/python
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
This is the main entry point for My TA
@@ -33,10 +18,10 @@
from ..common import load_schema_file as ld
from ..common import log as stulog
from ...common.lib_util import get_app_root_dir, get_mod_input_script_name
-from splunktalib import file_monitor as fm
-from splunktalib import modinput
-from splunktalib import orphan_process_monitor as opm
-from splunktalib.common import util as utils
+from ...splunktalib import file_monitor as fm
+from ...splunktalib import modinput
+from ...splunktalib import orphan_process_monitor as opm
+from ...splunktalib.common import util as utils
utils.remove_http_proxy_env_vars()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
index 789afce1..7290cfd8 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
"""
This module is used to filter and reload PATH.
"""
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py
index 3bd67b9d..4e5df1ca 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py
@@ -1,18 +1,3 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
from .data_collection.ta_data_client import TaDataClient
from ..splunktacollectorlib.common import log as stulog
from ..splunktacollectorlib.data_collection import ta_consts as c
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/__init__.py
new file mode 100755
index 00000000..46a5ea61
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/__init__.py
@@ -0,0 +1,2 @@
+__version__ = "0.9"
+__license__ = "Splunk"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cli/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/charset_normalizer/cli/__init__.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/__init__.py
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/consts.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/consts.py
new file mode 100755
index 00000000..32fb4386
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/consts.py
@@ -0,0 +1 @@
+util_log = "util"
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/log.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/log.py
old mode 100644
new mode 100755
similarity index 52%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/log.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/log.py
index a8dd4234..be99a794
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/log.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/log.py
@@ -1,70 +1,48 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
-Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved.
+Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved.
log utility for TA
"""
+from builtins import object
import logging
import logging.handlers as handlers
+import os
import os.path as op
-import time
-import warnings
-
-import splunktalib.common.util as cutil
-from splunktalib.common.pattern import singleton
-from splunktalib.splunk_platform import make_splunkhome_path
+from ..splunk_platform import make_splunkhome_path
+from . import util as cutil
+from .pattern import singleton
+import time
logging.Formatter.converter = time.gmtime
+__LOG_FORMAT__ = "%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, " \
+ "tid=%(threadName)s, file=%(filename)s, " \
+ "func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
+
def log_enter_exit(logger):
"""
Log decorator to log function enter and exit
"""
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
-
def log_decorator(func):
def wrapper(*args, **kwargs):
logger.debug("{} entered.".format(func.__name__))
result = func(*args, **kwargs)
logger.debug("{} exited.".format(func.__name__))
return result
-
return wrapper
-
return log_decorator
+def check_add_stderr_handler():
+ env_var = os.environ.get('splunk.cloudconnect.settings.logging.type')
+ return env_var and env_var == "stderr"
+
@singleton
-class Logs:
+class Logs(object):
+
def __init__(self, namespace=None, default_level=logging.INFO):
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._loggers = {}
self._default_level = default_level
if namespace is None:
@@ -74,7 +52,8 @@ def __init__(self, namespace=None, default_level=logging.INFO):
namespace = namespace.lower()
self._namespace = namespace
- def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
+ def get_logger(self, name, level=None,
+ maxBytes=25000000, backupCount=5):
"""
Set up a default logger.
@@ -92,25 +71,27 @@ def get_logger(self, name, level=None, maxBytes=25000000, backupCount=5):
if name in self._loggers:
return self._loggers[name]
- logfile = make_splunkhome_path(["var", "log", "splunk", name])
logger = logging.getLogger(name)
- handler_exists = any(
- [True for h in logger.handlers if h.baseFilename == logfile]
- )
- if not handler_exists:
- file_handler = handlers.RotatingFileHandler(
- logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount
- )
-
- formatter = logging.Formatter(
- "%(asctime)s +0000 log_level=%(levelname)s, pid=%(process)d, tid=%(threadName)s, "
- "file=%(filename)s, func_name=%(funcName)s, code_line_no=%(lineno)d | %(message)s"
- )
- file_handler.setFormatter(formatter)
- logger.addHandler(file_handler)
- logger.setLevel(level)
- logger.propagate = False
+ if check_add_stderr_handler():
+ import sys
+ ch = logging.StreamHandler(sys.stderr)
+ ch.setLevel(logging.ERROR)
+ formatter = logging.Formatter(__LOG_FORMAT__)
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+ else:
+ logfile = make_splunkhome_path(["var", "log", "splunk", name])
+ handler_exists = any(
+ [True for h in logger.handlers if h.baseFilename == logfile])
+ if not handler_exists:
+ file_handler = handlers.RotatingFileHandler(
+ logfile, mode="a", maxBytes=maxBytes, backupCount=backupCount)
+ formatter = logging.Formatter(__LOG_FORMAT__ )
+ file_handler.setFormatter(formatter)
+ logger.addHandler(file_handler)
+ logger.setLevel(level)
+ logger.propagate = False
self._loggers[name] = logger
return logger
@@ -141,10 +122,9 @@ def _get_log_name(self, name):
if self._namespace:
name = "{}_{}.log".format(self._namespace, name)
else:
- name = "{}.log".format(name)
+ name = "{}.log" .format(name)
return name
-
# Global logger
logger = Logs().get_logger("util")
@@ -153,12 +133,6 @@ def reset_logger(name):
"""
Reset global logger.
"""
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
global logger
logger = Logs().get_logger(name)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/pattern.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/pattern.py
new file mode 100755
index 00000000..16b2bbcd
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/pattern.py
@@ -0,0 +1,38 @@
+"""
+Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved.
+
+Commonly used design partten for python user, includes:
+ - singleton (Decorator function used to build singleton)
+"""
+from __future__ import print_function
+
+from functools import wraps
+
+
+def singleton(class_):
+ """
+ Singleton decoorator function.
+ """
+ instances = {}
+
+ @wraps(class_)
+ def getinstance(*args, **kwargs):
+ if class_ not in instances:
+ instances[class_] = class_(*args, **kwargs)
+ return instances[class_]
+ return getinstance
+
+
+class Singleton(type):
+ """
+ Singleton meta class
+ """
+
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(
+ *args, **kwargs)
+ print(cls)
+ return cls._instances[cls]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/util.py
old mode 100644
new mode 100755
similarity index 52%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/util.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/util.py
index 6209b570..c04b7ebd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/util.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/util.py
@@ -1,41 +1,19 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
-Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved.
+Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved.
"""
-import datetime
-import gc
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
import os
import os.path as op
+import datetime
import sys
-import urllib.error
-import urllib.parse
-import urllib.request
-import warnings
+import gc
+import urllib.request, urllib.parse, urllib.error
def handle_tear_down_signals(callback):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
import signal
signal.signal(signal.SIGTERM, callback)
@@ -46,23 +24,11 @@ def handle_tear_down_signals(callback):
def datetime_to_seconds(dt):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
epoch_time = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch_time).total_seconds()
def is_true(val):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
value = str(val).strip().upper()
if value in ("1", "TRUE", "T", "Y", "YES"):
return True
@@ -70,12 +36,6 @@ def is_true(val):
def is_false(val):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
value = str(val).strip().upper()
if value in ("0", "FALSE", "F", "N", "NO", "NONE", ""):
return True
@@ -83,12 +43,6 @@ def is_false(val):
def remove_http_proxy_env_vars():
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
for k in ("http_proxy", "https_proxy"):
if k in os.environ:
del os.environ[k]
@@ -112,11 +66,13 @@ def get_appname_from_path(absolute_path):
except IndexError:
pass
continue
+ #return None
return "-"
def escape_cdata(data):
- data = data.encode("utf-8", errors="xmlcharrefreplace").decode("utf-8")
+ # FIXME: This is a workaround for JIRA [addon-10459]
+ data = data.decode("utf-8", errors="replace").encode("utf-8", errors="xmlcharrefreplace").decode("utf-8")
data = data.replace("]]>", "]]>")
if data.endswith("]"):
data = data[:-1] + "%5D"
@@ -134,7 +90,15 @@ def extract_datainput_name(stanza_name):
except ValueError:
return stanza_name
- return stanza_name[idx + len(sep) :]
+ return stanza_name[idx + len(sep):]
+
+
+def escape_json_control_chars(json_str):
+ control_chars = ((r"\n", "\\\\n"), (r"\r", "\\\\r"),
+ (r"\r\n", "\\\\r\\\\n"))
+ for ch, replace in control_chars:
+ json_str = json_str.replace(ch, replace)
+ return json_str
def disable_stdout_buffer():
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/xml_dom_parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/xml_dom_parser.py
old mode 100644
new mode 100755
similarity index 58%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/xml_dom_parser.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/xml_dom_parser.py
index 74df077f..ae5a571c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/common/xml_dom_parser.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/common/xml_dom_parser.py
@@ -1,44 +1,27 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
import re
-
-from defusedxml import ElementTree as et
+from xml.etree import cElementTree as et
def parse_conf_xml_dom(xml_content):
"""
@xml_content: XML DOM from splunkd
"""
- xml_content = xml_content.decode("utf-8")
+
m = re.search(r'xmlns="([^"]+)"', xml_content)
ns = m.group(1)
m = re.search(r'xmlns:s="([^"]+)"', xml_content)
sub_ns = m.group(1)
entry_path = "./{%s}entry" % ns
stanza_path = "./{%s}title" % ns
- key_path = "./{{{}}}content/{{{}}}dict/{{{}}}key".format(ns, sub_ns, sub_ns)
- meta_path = "./{{{}}}dict/{{{}}}key".format(sub_ns, sub_ns)
- list_path = "./{{{}}}list/{{{}}}item".format(sub_ns, sub_ns)
+ key_path = "./{%s}content/{%s}dict/{%s}key" % (ns, sub_ns, sub_ns)
+ meta_path = "./{%s}dict/{%s}key" % (sub_ns, sub_ns)
+ list_path = "./{%s}list/{%s}item" % (sub_ns, sub_ns)
xml_conf = et.fromstring(xml_content)
stanza_objs = []
for entry in xml_conf.iterfind(entry_path):
for stanza in entry.iterfind(stanza_path):
- stanza_obj = {"name": stanza.text, "stanza": stanza.text}
+ stanza_obj = {"name": stanza.text,"stanza": stanza.text}
break
else:
continue
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/bin/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/bin/__init__.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/__init__.py
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/concurrent_executor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/concurrent_executor.py
old mode 100644
new mode 100755
similarity index 70%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/concurrent_executor.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/concurrent_executor.py
index 6615f53e..2a921306
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/concurrent_executor.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/concurrent_executor.py
@@ -1,29 +1,15 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
Concurrent executor provides concurrent executing function either in
a thread pool or a process pool
"""
-import splunktalib.concurrent.process_pool as pp
-import splunktalib.concurrent.thread_pool as tp
+from builtins import object
+from ..concurrent import thread_pool as tp
+from ..concurrent import process_pool as pp
+
+class ConcurrentExecutor(object):
-class ConcurrentExecutor:
def __init__(self, config):
"""
:param config: dict like object, contains thread_min_size (int),
@@ -31,15 +17,14 @@ def __init__(self, config):
process_size (int)
"""
- self._io_executor = tp.ThreadPool(
- config.get("thread_min_size", 0),
- config.get("thread_max_size", 0),
- config.get("task_queue_size", 1024),
- config.get("daemonize_thread", True),
- )
+ self._io_executor = tp.ThreadPool(config.get("thread_min_size", 0),
+ config.get("thread_max_size", 0),
+ config.get("task_queue_size", 1024),
+ config.get("daemonize_thread", True))
self._compute_executor = None
if config.get("process_size", 0):
- self._compute_executor = pp.ProcessPool(config.get("process_size", 0))
+ self._compute_executor = pp.ProcessPool(
+ config.get("process_size", 0))
def start(self):
self._io_executor.start()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/process_pool.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/process_pool.py
old mode 100644
new mode 100755
similarity index 66%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/process_pool.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/process_pool.py
index 6d9b4c74..282da0ef
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/process_pool.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/process_pool.py
@@ -1,29 +1,13 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
A wrapper of multiprocessing.pool
"""
+from builtins import object
import multiprocessing
-
-from splunktalib.common import log
+from ..common import log
-class ProcessPool:
+class ProcessPool(object):
"""
A simple wrapper of multiprocessing.pool
"""
@@ -32,9 +16,8 @@ def __init__(self, size=0, maxtasksperchild=10000):
if size <= 0:
size = multiprocessing.cpu_count()
self.size = size
- self._pool = multiprocessing.Pool(
- processes=size, maxtasksperchild=maxtasksperchild
- )
+ self._pool = multiprocessing.Pool(processes=size,
+ maxtasksperchild=maxtasksperchild)
self._stopped = False
def tear_down(self):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/thread_pool.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/thread_pool.py
old mode 100644
new mode 100755
similarity index 86%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/thread_pool.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/thread_pool.py
index 57c74e8d..b696b6b1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/concurrent/thread_pool.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/concurrent/thread_pool.py
@@ -1,33 +1,22 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
A simple thread pool implementation
"""
+from __future__ import division
-import multiprocessing
-import queue
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+from builtins import object
import threading
+import queue
+import multiprocessing
import traceback
from time import time
-from splunktalib.common import log
+from ..common import log
-class ThreadPool:
+class ThreadPool(object):
"""
A simple thread pool implementation
"""
@@ -35,7 +24,8 @@ class ThreadPool:
_high_watermark = 0.2
_resize_window = 10
- def __init__(self, min_size=1, max_size=128, task_queue_size=1024, daemon=True):
+ def __init__(self, min_size=1, max_size=128,
+ task_queue_size=1024, daemon=True):
assert task_queue_size
if not min_size or min_size <= 0:
@@ -161,9 +151,8 @@ def resize(self, new_size):
return
if self._lock.locked() or not self._started:
- log.logger.info(
- "Try to resize thread pool during the tear " "down process, do nothing"
- )
+ log.logger.info("Try to resize thread pool during the tear "
+ "down process, do nothing")
return
with self._lock:
@@ -204,22 +193,16 @@ def _remove_exited_threads_with_lock(self):
self._thrs = live_thrs
def _do_resize_according_to_loads(self):
- if (
- self._last_resize_time
- and time() - self._last_resize_time < self._resize_window
- ):
+ if (self._last_resize_time and
+ time() - self._last_resize_time < self._resize_window):
return
thr_size = self._last_size
free_thrs = thr_size - self._occupied_threads
work_size = self._work_queue.qsize()
- log.logger.debug(
- "current_thr_size=%s, free_thrs=%s, work_size=%s",
- thr_size,
- free_thrs,
- work_size,
- )
+ log.logger.debug("current_thr_size=%s, free_thrs=%s, work_size=%s",
+ thr_size, free_thrs, work_size)
if work_size and work_size > free_thrs:
if thr_size < self._max_size:
thr_size = min(thr_size * 2, self._max_size)
@@ -228,7 +211,7 @@ def _do_resize_according_to_loads(self):
free = free_thrs * 1.0
if free / thr_size >= self._high_watermark and free_thrs >= 2:
# 20 % thrs are idle, tear down half of the idle ones
- thr_size = thr_size - int(free_thrs // 2)
+ thr_size = thr_size - int(free_thrs / 2)
if thr_size > self._min_size:
self.resize(thr_size)
self._last_resize_time = time()
@@ -247,9 +230,8 @@ def _do_admin(self):
break
else:
self._do_resize_according_to_loads()
- log.logger.info(
- "ThreadPool admin thread=%s stopped.", threading.current_thread().getName()
- )
+ log.logger.info("ThreadPool admin thread=%s stopped.",
+ threading.current_thread().getName())
def _run(self):
"""
@@ -282,12 +264,12 @@ def _run(self):
log.logger.debug("Done with exec job")
log.logger.info("Thread work_queue_size=%d", work_queue.qsize())
- log.logger.debug(
- "Worker thread %s stopped.", threading.current_thread().getName()
- )
+ log.logger.debug("Worker thread %s stopped.",
+ threading.current_thread().getName())
+
+class AsyncResult(object):
-class AsyncResult:
def __init__(self, func, args, kwargs, callback):
self._func = func
self._args = args
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/__init__.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/__init__.py
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_endpoints.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_endpoints.py
old mode 100644
new mode 100755
similarity index 70%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_endpoints.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_endpoints.py
index ebda81cd..7a12f0cd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_endpoints.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_endpoints.py
@@ -1,22 +1,7 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import splunktalib.common.util as util
-import splunktalib.common.xml_dom_parser as xdp
-from splunktalib.conf_manager.request import content_request
+import sys
+from .request import content_request
+from ..common import util
+from ..common import xml_dom_parser as xdp
CONF_ENDPOINT = "%s/servicesNS/%s/%s/configs/conf-%s"
@@ -35,7 +20,7 @@ def reload_conf(splunkd_uri, session_key, app_name, conf_name, throw=False):
uri = _conf_endpoint_ns(splunkd_uri, "nobody", app_name, conf_name)
uri += "/_reload"
- msg = "Failed to reload conf in app={}: {}".format(app_name, conf_name)
+ msg = "Failed to reload conf in app=%s: %s" % (app_name, conf_name)
try:
content_request(uri, session_key, "GET", None, msg)
@@ -44,9 +29,8 @@ def reload_conf(splunkd_uri, session_key, app_name, conf_name, throw=False):
raise
-def create_stanza(
- splunkd_uri, session_key, owner, app_name, conf_name, stanza, key_values
-):
+def create_stanza(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -59,8 +43,12 @@ def create_stanza(
"""
uri = _conf_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
- msg = "Failed to create stanza={} in conf={}".format(stanza, conf_name)
- payload = {"name": str(stanza).encode("utf-8")}
+ msg = "Failed to create stanza=%s in conf=%s" % (stanza, conf_name)
+ if sys.version_info < (3, 0):
+ payload = {"name": unicode(stanza).encode('utf-8')}
+ else:
+ payload = {"name": str(stanza)}
+
for key in key_values:
if key != "name":
payload[key] = str(key_values[key])
@@ -68,7 +56,8 @@ def create_stanza(
content_request(uri, session_key, "POST", payload, msg)
-def get_conf(splunkd_uri, session_key, owner, app_name, conf_name, stanza=None):
+def get_conf(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza=None):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -87,17 +76,13 @@ def get_conf(splunkd_uri, session_key, owner, app_name, conf_name, stanza=None):
# get all the stanzas at one time
uri += "?count=0&offset=0"
- msg = "Failed to get stanza={} in conf={}".format(
- stanza if stanza else stanza,
- conf_name,
- )
+ msg = "Failed to get stanza=%s in conf=%s" % (stanza if stanza else stanza, conf_name)
content = content_request(uri, session_key, "GET", None, msg)
return xdp.parse_conf_xml_dom(content)
-def update_stanza(
- splunkd_uri, session_key, owner, app_name, conf_name, stanza, key_values
-):
+def update_stanza(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -111,13 +96,12 @@ def update_stanza(
uri = _conf_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
uri += "/" + util.format_stanza_name(stanza)
- msg = "Failed to update stanza={} in conf={}".format(stanza, conf_name)
+ msg = "Failed to update stanza=%s in conf=%s" % (stanza, conf_name)
return content_request(uri, session_key, "POST", key_values, msg)
-def delete_stanza(
- splunkd_uri, session_key, owner, app_name, conf_name, stanza, throw=False
-):
+def delete_stanza(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, throw=False):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -130,21 +114,22 @@ def delete_stanza(
uri = _conf_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
uri += "/" + util.format_stanza_name(stanza)
- msg = "Failed to delete stanza={} in conf={}".format(stanza, conf_name)
+ msg = "Failed to delete stanza=%s in conf=%s" % (stanza, conf_name)
content_request(uri, session_key, "DELETE", None, msg)
-def stanza_exist(splunkd_uri, session_key, owner, app_name, conf_name, stanza):
+def stanza_exist(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza):
try:
- res = get_conf(splunkd_uri, session_key, owner, app_name, conf_name, stanza)
+ res = get_conf(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza)
return len(res) > 0
except Exception:
return False
-def operate_conf(
- splunkd_uri, session_key, owner, app_name, conf_name, stanza, operation
-):
+def operate_conf(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, operation):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -157,6 +142,6 @@ def operate_conf(
assert operation in ("disable", "enable")
uri = _conf_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
- uri += "/{}/{}".format(util.format_stanza_name(stanza), operation)
- msg = "Failed to disable/enable stanza={} in conf={}".format(stanza, conf_name)
+ uri += "/%s/%s" % (util.format_stanza_name(stanza), operation)
+ msg = "Failed to disable/enable stanza=%s in conf=%s" % (stanza, conf_name)
content_request(uri, session_key, "POST", None, msg)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_manager.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_manager.py
old mode 100644
new mode 100755
similarity index 54%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_manager.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_manager.py
index 3fea0c1c..fc5c5946
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/conf_manager.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/conf_manager.py
@@ -1,29 +1,14 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
This module hanles configuration related stuff
"""
+from builtins import object
import os.path as op
-import splunktalib.conf_manager.conf_endpoints as scmc
-import splunktalib.conf_manager.data_input_endpoints as scmdi
-import splunktalib.conf_manager.property_endpoints as scmp
-import splunktalib.conf_manager.request as req
+from . import conf_endpoints as scmc
+from . import data_input_endpoints as scmdi
+from . import property_endpoints as scmp
+from . import request as req
def conf_file2name(conf_file):
@@ -33,7 +18,8 @@ def conf_file2name(conf_file):
return conf_name
-class ConfManager:
+class ConfManager(object):
+
def __init__(self, splunkd_uri, session_key, owner="nobody", app_name="-"):
"""
:app_name: when creating conf stanza, app_name is required to set not
@@ -65,10 +51,12 @@ def all_stanzas(self, conf_name, do_reload=False, ret_metadata=False):
if do_reload:
self.reload_conf(conf_name)
- stanzas = scmc.get_conf(self.splunkd_uri, self.session_key, "-", "-", conf_name)
+ stanzas = scmc.get_conf(self.splunkd_uri, self.session_key,
+ "-", "-", conf_name)
return self._delete_metadata(stanzas, ret_metadata)
- def all_stanzas_as_dicts(self, conf_name, do_reload=False, ret_metadata=False):
+ def all_stanzas_as_dicts(self, conf_name, do_reload=False,
+ ret_metadata=False):
"""
:return: a dict of dict stanza objects if successful.
otherwise raise exception
@@ -77,7 +65,8 @@ def all_stanzas_as_dicts(self, conf_name, do_reload=False, ret_metadata=False):
stanzas = self.all_stanzas(conf_name, do_reload, ret_metadata)
return {stanza["name"]: stanza for stanza in stanzas}
- def get_stanza(self, conf_name, stanza, do_reload=False, ret_metadata=False):
+ def get_stanza(self, conf_name, stanza,
+ do_reload=False, ret_metadata=False):
"""
@return dict if success otherwise raise exception
"""
@@ -85,9 +74,8 @@ def get_stanza(self, conf_name, stanza, do_reload=False, ret_metadata=False):
if do_reload:
self.reload_conf(conf_name)
- stanzas = scmc.get_conf(
- self.splunkd_uri, self.session_key, "-", "-", conf_name, stanza
- )
+ stanzas = scmc.get_conf(self.splunkd_uri, self.session_key,
+ "-", "-", conf_name, stanza)
stanzas = self._delete_metadata(stanzas, ret_metadata)
return stanzas[0]
@@ -95,92 +83,50 @@ def reload_conf(self, conf_name):
scmc.reload_conf(self.splunkd_uri, self.session_key, "-", conf_name)
def enable_conf(self, conf_name, stanza):
- scmc.operate_conf(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- "enable",
- )
+ scmc.operate_conf(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza, "enable")
def disable_conf(self, conf_name, stanza):
- scmc.operate_conf(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- "disable",
- )
+ scmc.operate_conf(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza, "disable")
def get_property(self, conf_name, stanza, key, do_reload=False):
if do_reload:
self.reload_conf(conf_name)
- return scmp.get_property(
- self.splunkd_uri, self.session_key, "-", "-", conf_name, stanza, key
- )
+ return scmp.get_property(self.splunkd_uri, self.session_key,
+ "-", "-", conf_name, stanza, key)
def stanza_exist(self, conf_name, stanza):
- return scmc.stanza_exist(
- self.splunkd_uri, self.session_key, "-", "-", conf_name, stanza
- )
+ return scmc.stanza_exist(self.splunkd_uri, self.session_key,
+ "-", "-", conf_name, stanza)
def create_stanza(self, conf_name, stanza, key_values):
- scmc.create_stanza(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- key_values,
- )
+ scmc.create_stanza(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza, key_values)
def update_stanza(self, conf_name, stanza, key_values):
- scmc.update_stanza(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- key_values,
- )
+ scmc.update_stanza(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza, key_values)
def delete_stanza(self, conf_name, stanza):
- scmc.delete_stanza(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- )
+ scmc.delete_stanza(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza)
def create_properties(self, conf_name, stanza):
- scmp.create_properties(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- )
+ scmp.create_properties(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza)
def update_properties(self, conf_name, stanza, key_values):
- scmp.update_properties(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- conf_name,
- stanza,
- key_values,
- )
+ scmp.update_properties(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ conf_name, stanza, key_values)
def delete_stanzas(self, conf_name, stanzas):
"""
@@ -198,71 +144,40 @@ def delete_stanzas(self, conf_name, stanzas):
# data input management
def create_data_input(self, input_type, name, key_values=None):
- scmdi.create_data_input(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- input_type,
- name,
- key_values,
- )
+ scmdi.create_data_input(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ input_type, name, key_values)
def update_data_input(self, input_type, name, key_values):
- scmdi.update_data_input(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- input_type,
- name,
- key_values,
- )
+ scmdi.update_data_input(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ input_type, name, key_values)
def delete_data_input(self, input_type, name):
- scmdi.delete_data_input(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- input_type,
- name,
- )
+ scmdi.delete_data_input(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ input_type, name)
def get_data_input(self, input_type, name=None, do_reload=False):
if do_reload:
self.reload_data_input(input_type)
- return scmdi.get_data_input(
- self.splunkd_uri, self.session_key, "-", "-", input_type, name
- )
+ return scmdi.get_data_input(self.splunkd_uri, self.session_key,
+ "-", "-", input_type, name)
def reload_data_input(self, input_type):
- scmdi.reload_data_input(
- self.splunkd_uri, self.session_key, "-", "-", input_type
- )
+ scmdi.reload_data_input(self.splunkd_uri, self.session_key,
+ "-", "-", input_type)
def enable_data_input(self, input_type, name):
- scmdi.operate_data_input(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- input_type,
- name,
- "enable",
- )
+ scmdi.operate_data_input(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ input_type, name, "enable")
def disable_data_input(self, input_type, name):
- scmdi.operate_data_input(
- self.splunkd_uri,
- self.session_key,
- self.owner,
- self.app_name,
- input_type,
- name,
- "disable",
- )
+ scmdi.operate_data_input(self.splunkd_uri, self.session_key,
+ self.owner, self.app_name,
+ input_type, name, "disable")
def data_input_exist(self, input_type, name):
try:
@@ -272,7 +187,8 @@ def data_input_exist(self, input_type, name):
return result is not None
- def all_data_input_stanzas(self, input_type, do_reload=False, ret_metadata=False):
+ def all_data_input_stanzas(self, input_type, do_reload=False,
+ ret_metadata=False):
stanzas = self.get_data_input(input_type, do_reload=do_reload)
for stanza in stanzas:
if "eai:acl" in stanza and "app" in stanza["eai:acl"]:
@@ -280,9 +196,8 @@ def all_data_input_stanzas(self, input_type, do_reload=False, ret_metadata=False
stanza["userName"] = stanza["eai:acl"].get("owner", "nobody")
return self._delete_metadata(stanzas, ret_metadata)
- def get_data_input_stanza(
- self, input_type, name, do_reload=False, ret_metadata=False
- ):
+ def get_data_input_stanza(self, input_type, name, do_reload=False,
+ ret_metadata=False):
stanzas = self.get_data_input(input_type, name, do_reload)
stanzas = self._delete_metadata(stanzas, ret_metadata)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/data_input_endpoints.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/data_input_endpoints.py
old mode 100644
new mode 100755
similarity index 71%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/data_input_endpoints.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/data_input_endpoints.py
index a17ab725..c58a853b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/data_input_endpoints.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/data_input_endpoints.py
@@ -1,22 +1,7 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import splunktalib.common.util as util
-import splunktalib.common.xml_dom_parser as xdp
-from splunktalib.conf_manager.request import content_request
+import sys
+from .request import content_request
+from ..common import util
+from ..common import xml_dom_parser as xdp
INPUT_ENDPOINT = "%s/servicesNS/%s/%s/data/inputs/%s"
@@ -25,9 +10,8 @@ def _input_endpoint_ns(uri, owner, app, input_type):
return INPUT_ENDPOINT % (uri, owner, app, input_type)
-def reload_data_input(
- splunkd_uri, session_key, owner, app_name, input_type, throw=False
-):
+def reload_data_input(splunkd_uri, session_key, owner, app_name,
+ input_type, throw=False):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -40,7 +24,7 @@ def reload_data_input(
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += "/_reload"
- msg = "Failed to reload data input in app={}: {}".format(app_name, input_type)
+ msg = "Failed to reload data input in app=%s: %s" % (app_name, input_type)
try:
content_request(uri, session_key, "GET", None, msg)
except Exception:
@@ -48,9 +32,8 @@ def reload_data_input(
raise
-def create_data_input(
- splunkd_uri, session_key, owner, app_name, input_type, name, key_values
-):
+def create_data_input(splunkd_uri, session_key, owner, app_name, input_type,
+ name, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -64,18 +47,19 @@ def create_data_input(
:param key_values: a K-V dict of details in the data input stanza.
:return: None on success else raise exception
"""
+ if sys.version_info < (3, 0):
+ key_values["name"] = unicode(name).encode('utf-8')
+ else:
+ key_values["name"] = str(name)
- key_values["name"] = str(name).encode("utf-8")
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
- msg = "Failed to create data input in app={}: {}://{}".format(
- app_name,
- input_type,
- name,
- )
+ msg = "Failed to create data input in app=%s: %s://%s" % (
+ app_name, input_type, name)
content_request(uri, session_key, "POST", key_values, msg)
-def get_data_input(splunkd_uri, session_key, owner, app_name, input_type, name=None):
+def get_data_input(splunkd_uri, session_key, owner, app_name, input_type,
+ name=None):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -96,18 +80,14 @@ def get_data_input(splunkd_uri, session_key, owner, app_name, input_type, name=N
# get all the stanzas at one time
uri += "?count=0&offset=0"
- msg = "Failed to get data input in app={}: {}://{}".format(
- app_name,
- input_type,
- name if name else name,
- )
+ msg = "Failed to get data input in app=%s: %s://%s" % (
+ app_name, input_type, name if name else name)
content = content_request(uri, session_key, "GET", None, msg)
return xdp.parse_conf_xml_dom(content)
-def update_data_input(
- splunkd_uri, session_key, owner, app_name, input_type, name, key_values
-):
+def update_data_input(splunkd_uri, session_key, owner, app_name, input_type,
+ name, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -126,15 +106,13 @@ def update_data_input(
del key_values["name"]
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += "/" + util.format_stanza_name(name)
- msg = "Failed to update data input in app={}: {}://{}".format(
- app_name,
- input_type,
- name,
- )
+ msg = "Failed to update data input in app=%s: %s://%s" % (
+ app_name, input_type, name)
content_request(uri, session_key, "POST", key_values, msg)
-def delete_data_input(splunkd_uri, session_key, owner, app_name, input_type, name):
+def delete_data_input(splunkd_uri, session_key, owner, app_name, input_type,
+ name):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -150,17 +128,13 @@ def delete_data_input(splunkd_uri, session_key, owner, app_name, input_type, nam
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
uri += "/" + util.format_stanza_name(name)
- msg = "Failed to delete data input in app={}: {}://{}".format(
- app_name,
- input_type,
- name,
- )
+ msg = "Failed to delete data input in app=%s: %s://%s" % (
+ app_name, input_type, name)
content_request(uri, session_key, "DELETE", None, msg)
-def operate_data_input(
- splunkd_uri, session_key, owner, app_name, input_type, name, operation
-):
+def operate_data_input(splunkd_uri, session_key, owner, app_name,
+ input_type, name, operation):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -176,11 +150,7 @@ def operate_data_input(
assert operation in ("disable", "enable")
uri = _input_endpoint_ns(splunkd_uri, owner, app_name, input_type)
- uri += "/{}/{}".format(util.format_stanza_name(name), operation)
- msg = "Failed to {} data input in app={}: {}://{}".format(
- operation,
- app_name,
- input_type,
- name,
- )
+ uri += "/%s/%s" % (util.format_stanza_name(name), operation)
+ msg = "Failed to %s data input in app=%s: %s://%s" % (
+ operation, app_name, input_type, name)
content_request(uri, session_key, "POST", None, msg)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/knowledge_objects.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/knowledge_objects.py
new file mode 100755
index 00000000..71d550d4
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/knowledge_objects.py
@@ -0,0 +1,37 @@
+from builtins import object
+from . import request as req
+from ..common import xml_dom_parser as xdp
+
+
+class KnowledgeObjectManager(object):
+
+ def __init__(self, splunkd_uri, session_key):
+ self.splunkd_uri = splunkd_uri
+ self.session_key = session_key
+
+ def apps(self):
+ """
+ @return: a list of dict containing apps if successfuly otherwise
+ otherwise raise exceptions
+ """
+
+ uri = "{}/services/apps/local?count=0&offset=0".format(
+ self.splunkd_uri)
+ apps = self._do_request(uri, "GET", None, "Failed to get apps")
+ return apps
+
+ def indexes(self):
+ """
+ @return: a list of dict containing indexes if successfuly
+ otherwise raise exceptions
+ """
+
+ uri = "{}/services/data/indexes/?count=0&offset=0".format(
+ self.splunkd_uri)
+ indexes = self._do_request(uri, "GET", None, "Failed to get indexes")
+ return indexes
+
+ def _do_request(self, uri, method, payload, err_msg):
+ _, content = req.content_request(uri, self.session_key, method,
+ payload, err_msg)
+ return xdp.parse_conf_xml_dom(content)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/property_endpoints.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/property_endpoints.py
old mode 100644
new mode 100755
similarity index 65%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/property_endpoints.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/property_endpoints.py
index b28e2e5b..a7249a33
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/property_endpoints.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/property_endpoints.py
@@ -1,21 +1,6 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import splunktalib.common.util as util
-from splunktalib.conf_manager.request import content_request
+from .request import content_request
+from ..common import util
+
PROPERTY_ENDPOINT = "%s/servicesNS/%s/%s/properties/%s"
@@ -24,7 +9,8 @@ def _property_endpoint_ns(uri, owner, app, conf_name):
return PROPERTY_ENDPOINT % (uri, owner, app, conf_name)
-def create_properties(splunkd_uri, session_key, owner, app_name, conf_name, stanza):
+def create_properties(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -36,12 +22,14 @@ def create_properties(splunkd_uri, session_key, owner, app_name, conf_name, stan
"""
uri = _property_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
- msg = "Properties: failed to create stanza={} in conf={}".format(stanza, conf_name)
+ msg = "Properties: failed to create stanza=%s in conf=%s" % \
+ (stanza, conf_name)
payload = {"__stanza": stanza}
content_request(uri, session_key, "POST", payload, msg)
-def get_property(splunkd_uri, session_key, owner, app_name, conf_name, stanza, key):
+def get_property(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, key):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -54,18 +42,14 @@ def get_property(splunkd_uri, session_key, owner, app_name, conf_name, stanza, k
"""
uri = _property_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
- uri += "/{}/{}".format(util.format_stanza_name(stanza), key)
- msg = "Properties: failed to get conf={}, stanza={}, key={}".format(
- conf_name,
- stanza,
- key,
- )
+ uri += "/%s/%s" % (util.format_stanza_name(stanza), key)
+ msg = "Properties: failed to get conf=%s, stanza=%s, key=%s" % \
+ (conf_name, stanza, key)
return content_request(uri, session_key, "GET", None, msg)
-def update_properties(
- splunkd_uri, session_key, owner, app_name, conf_name, stanza, key_values
-):
+def update_properties(splunkd_uri, session_key, owner, app_name, conf_name,
+ stanza, key_values):
"""
:param splunkd_uri: splunkd uri, e.g. https://127.0.0.1:8089
:param session_key: splunkd session key
@@ -79,7 +63,8 @@ def update_properties(
uri = _property_endpoint_ns(splunkd_uri, owner, app_name, conf_name)
uri += "/" + util.format_stanza_name(stanza)
- msg = "Properties: failed to update conf={}, stanza={}".format(conf_name, stanza)
+ msg = "Properties: failed to update conf=%s, stanza=%s" % \
+ (conf_name, stanza)
has_name = False
if "name" in key_values:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/request.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/request.py
new file mode 100755
index 00000000..f837173a
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/request.py
@@ -0,0 +1,44 @@
+from .. import rest
+from ..common import log
+
+
+class ConfRequestException(Exception):
+ pass
+
+
+class ConfNotExistsException(ConfRequestException):
+ pass
+
+
+class ConfExistsException(ConfRequestException):
+ pass
+
+
+def content_request(uri, session_key, method, payload, err_msg):
+ """
+ :return: response content if successful otherwise raise
+ ConfRequestException
+ """
+
+ resp, content = rest.splunkd_request(uri, session_key, method,
+ data=payload, retry=3)
+ if resp is None and content is None:
+ return None
+
+ if resp.status >= 200 and resp.status <= 204:
+ return content
+ else:
+ msg = "%s, status=%s, reason=%s, detail=%s" % (
+ err_msg, resp.status, resp.reason, content.decode('utf-8'))
+
+ if not (method == "GET" and resp.status == 404):
+ log.logger.error(msg)
+
+ if resp.status == 404:
+ raise ConfNotExistsException(msg)
+ if resp.status == 409:
+ raise ConfExistsException(msg)
+ else:
+ if content and "already exists" in content:
+ raise ConfExistsException(msg)
+ raise ConfRequestException(msg)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/ta_conf_manager.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/ta_conf_manager.py
old mode 100644
new mode 100755
similarity index 80%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/ta_conf_manager.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/ta_conf_manager.py
index bc51242f..cbb646a1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/conf_manager/ta_conf_manager.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/conf_manager/ta_conf_manager.py
@@ -1,33 +1,18 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
This module hanles high level TA configuration related stuff
"""
+from builtins import object
import copy
import os.path as op
-import splunktalib.common.util as utils
-import splunktalib.conf_manager.conf_manager as conf
-import splunktalib.conf_manager.request as conf_req
-import splunktalib.credentials as cred
+from . import conf_manager as conf
+from . import request as conf_req
+from .. import credentials as cred
+from ..common import util as utils
-class TAConfManager:
+class TAConfManager(object):
encrypted_token = "******"
reserved_keys = ("userName", "appName")
@@ -36,10 +21,11 @@ def __init__(self, conf_file, splunkd_uri, session_key, appname=None):
if appname is None:
appname = utils.get_appname_from_path(op.abspath(__file__))
self._conf_file = conf.conf_file2name(conf_file)
- self._conf_mgr = conf.ConfManager(splunkd_uri, session_key, app_name=appname)
+ self._conf_mgr = conf.ConfManager(splunkd_uri, session_key,
+ app_name=appname)
self._cred_mgr = cred.CredentialManager(
- splunkd_uri, session_key, app=appname, owner="nobody", realm=appname
- )
+ splunkd_uri, session_key, app=appname,
+ owner="nobody", realm=appname)
self._keys = None
def set_appname(self, appname):
@@ -61,7 +47,7 @@ def _delete_reserved_keys(self, stanza):
def create(self, stanza):
"""
- @stanza: dict like object
+ @stanza: dick like object
{
"name": xxx,
"k1": v1,
@@ -73,13 +59,13 @@ def create(self, stanza):
stanza = self._delete_reserved_keys(stanza)
encrypted_stanza = self._encrypt(stanza)
- self._conf_mgr.create_stanza(
- self._conf_file, encrypted_stanza["name"], encrypted_stanza
- )
+ self._conf_mgr.create_stanza(self._conf_file,
+ encrypted_stanza["name"],
+ encrypted_stanza)
def update(self, stanza):
"""
- @stanza: dict like object
+ @stanza: dick like object
{
"name": xxx,
"k1": v1,
@@ -96,8 +82,7 @@ def update(self, stanza):
stanza = self._delete_reserved_keys(stanza)
encrypted_stanza = self._encrypt(stanza)
self._conf_mgr.update_properties(
- self._conf_file, encrypted_stanza["name"], encrypted_stanza
- )
+ self._conf_file, encrypted_stanza["name"], encrypted_stanza)
def delete(self, stanza_name):
"""
@@ -117,9 +102,8 @@ def get(self, stanza_name, return_acl=False):
@return: dict object if sucess otherwise raise exception
"""
- stanza = self._conf_mgr.get_stanza(
- self._conf_file, stanza_name, ret_metadata=return_acl
- )
+ stanza = self._conf_mgr.get_stanza(self._conf_file, stanza_name,
+ ret_metadata=return_acl)
stanza = self._decrypt(stanza)
stanza["disabled"] = utils.is_true(stanza.get("disabled"))
return stanza
@@ -131,7 +115,8 @@ def all(self, filter_disabled=False, return_acl=True):
"""
results = {}
- stanzas = self._conf_mgr.all_stanzas(self._conf_file, ret_metadata=return_acl)
+ stanzas = self._conf_mgr.all_stanzas(self._conf_file,
+ ret_metadata=return_acl)
for stanza in stanzas:
stanza = self._decrypt(stanza)
stanza["disabled"] = utils.is_true(stanza.get("disabled"))
@@ -202,7 +187,8 @@ def _decrypt(self, stanza):
clear_password = None
for key in self._keys:
if key in stanza and stanza[key] == self.encrypted_token:
- clear_password = self._cred_mgr.get_clear_password(stanza_name)
+ clear_password = self._cred_mgr.get_clear_password(
+ stanza_name)
break
if clear_password:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/credentials.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/credentials.py
old mode 100644
new mode 100755
similarity index 64%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/credentials.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/credentials.py
index 13e742e7..96fae1f8
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/credentials.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/credentials.py
@@ -1,31 +1,16 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
Handles credentials related stuff
"""
+from builtins import str
+from builtins import range
+from builtins import object
import re
-import warnings
-
-import defusedxml.minidom as xdm
+import xml.dom.minidom as xdm
-import splunktalib.common.util as util
-import splunktalib.common.xml_dom_parser as xdp
-import splunktalib.rest as rest
+from . import rest
+from .common import util
+from .common import xml_dom_parser as xdp
# Splunk can only encrypt string when length <=255
SPLUNK_CRED_LEN_LIMIT = 255
@@ -39,36 +24,26 @@ class CredNotFound(CredException):
"""
Credential information not exists
"""
-
pass
-def create_credential_manager(username, password, splunkd_uri, app, owner, realm):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
- session_key = CredentialManager.get_session_key(username, password, splunkd_uri)
+def create_credential_manager(username, password, splunkd_uri,
+ app, owner, realm):
+ session_key = CredentialManager.get_session_key(
+ username, password, splunkd_uri)
return CredentialManager(splunkd_uri, session_key, app, owner, realm)
-class CredentialManager:
+class CredentialManager(object):
"""
Credential related interfaces
"""
- def __init__(self, splunkd_uri, session_key, app="-", owner="nobody", realm=None):
+ def __init__(self, splunkd_uri, session_key,
+ app="-", owner="nobody", realm=None):
"""
:app: when creating/upating/deleting app is required
"""
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._app = app
self._splunkd_uri = splunkd_uri
@@ -92,7 +67,8 @@ def set_appname(self, app):
self._app = app
@staticmethod
- def get_session_key(username, password, splunkd_uri="https://localhost:8089"):
+ def get_session_key(username, password,
+ splunkd_uri="https://localhost:8089"):
"""
Get session key by using login username and passwrod
:return: session_key if successful, None if failed
@@ -105,8 +81,7 @@ def get_session_key(username, password, splunkd_uri="https://localhost:8089"):
}
response, content = rest.splunkd_request(
- eid, None, method="POST", data=postargs
- )
+ eid, None, method="POST", data=postargs)
if response is None and content is None:
raise CredException("Get session key failed.")
@@ -144,37 +119,23 @@ def _update(self, name, str_to_encrypt):
:return: raise on failure
"""
+ self.delete(name)
+
if len(str_to_encrypt) <= SPLUNK_CRED_LEN_LIMIT:
- self._do_update(name, str_to_encrypt)
+ self._create(name, str_to_encrypt)
return
# split the str_to_encrypt when len > 255
length = SPLUNK_CRED_LEN_LIMIT
i = 0
while length < len(str_to_encrypt) + SPLUNK_CRED_LEN_LIMIT:
- curr_str = str_to_encrypt[length - SPLUNK_CRED_LEN_LIMIT : length]
+ curr_str = str_to_encrypt[length - SPLUNK_CRED_LEN_LIMIT:length]
length += SPLUNK_CRED_LEN_LIMIT
stanza_name = self._sep.join((name, str(i)))
- self._do_update(stanza_name, curr_str)
+ self._create(stanza_name, curr_str)
i += 1
- def _do_update(self, name, password):
- try:
- self._create(name, password)
- except CredException:
- payload = {"password": password}
- endpoint = self._get_endpoint(name)
- response, _ = rest.splunkd_request(
- endpoint, self._session_key, method="POST", data=payload
- )
- if not response or response.status not in (200, 201):
- raise CredException(
- "Unable to update password for username={}, status={}".format(
- name, response.status
- )
- )
-
def _create(self, name, str_to_encrypt):
"""
Create a new stored credential.
@@ -188,9 +149,8 @@ def _create(self, name, str_to_encrypt):
}
endpoint = self._get_endpoint(name)
- resp, content = rest.splunkd_request(
- endpoint, self._session_key, method="POST", data=payload
- )
+ resp, content = rest.splunkd_request(endpoint, self._session_key,
+ method="POST", data=payload)
if not resp or resp.status not in (200, 201, "200", "201"):
raise CredException("Failed to encrypt username {}".format(name))
@@ -208,20 +168,14 @@ def delete(self, name, throw=False):
except Exception:
raise
- prefix = self._realm + ":" + name + self._sep
+ ent_regx = "%s:(%s%s\d+):" % (self._realm, name, self._sep)
+ ent_pattern = re.compile(ent_regx)
for stanza in stanzas:
stanza_name = stanza.get("name")
- match = True
- try:
- if stanza_name[: len(prefix)] != prefix:
- match = False
- num = stanza_name[len(prefix) : -1]
- int(num)
- except (IndexError, ValueError):
- match = False
+ match = ent_pattern.match(stanza_name)
if match:
try:
- delete_name = name + self._sep + num
+ delete_name = match.group(1)
self._delete(delete_name, throw=True)
except CredNotFound:
pass
@@ -237,17 +191,16 @@ def _delete(self, name, throw=False):
endpoint = self._get_endpoint(name)
response, content = rest.splunkd_request(
- endpoint, self._session_key, method="DELETE"
- )
+ endpoint, self._session_key, method="DELETE")
if response is not None and response.status in (404, "404"):
if throw:
- raise CredNotFound("Credential stanza not exits - {}".format(name))
+ raise CredNotFound(
+ "Credential stanza not exits - {}".format(name))
elif not response or response.status not in (200, 201, "200", "201"):
if throw:
raise CredException(
- "Failed to delete credential stanza {}".format(name)
- )
+ "Failed to delete credential stanza {}".format(name))
def get_all_passwords(self):
results = {}
@@ -262,21 +215,20 @@ def get_all_passwords(self):
exist_stanza = results.get(actual_name)
else:
exist_stanza = stanza
- exist_stanza["name"] = actual_name
- exist_stanza["username"] = exist_stanza["username"].split(
- self._sep
- )[0]
- exist_stanza["clears"] = {}
- exist_stanza["encrs"] = {}
+ exist_stanza['name'] = actual_name
+ exist_stanza['username'] = \
+ exist_stanza['username'].split(self._sep)[0]
+ exist_stanza['clears'] = {}
+ exist_stanza['encrs'] = {}
try:
- exist_stanza["clears"][index] = stanza.get("clear_password")
- exist_stanza["encrs"][index] = stanza.get("encr_password")
+ exist_stanza['clears'][index] = stanza.get('clear_password')
+ exist_stanza['encrs'][index] = stanza.get('encr_password')
except KeyError:
- exist_stanza["clears"] = {}
- exist_stanza["encrs"] = {}
- exist_stanza["clears"][index] = stanza.get("clear_password")
- exist_stanza["encrs"][index] = stanza.get("encr_password")
+ exist_stanza['clears'] = {}
+ exist_stanza['encrs'] = {}
+ exist_stanza['clears'][index] = stanza.get('clear_password')
+ exist_stanza['encrs'][index] = stanza.get('encr_password')
results[actual_name] = exist_stanza
@@ -285,19 +237,19 @@ def get_all_passwords(self):
# merge the stanzas by index
for name, stanza in list(results.items()):
- field_clear = stanza.get("clears")
- field_encr = stanza.get("encrs")
+ field_clear = stanza.get('clears')
+ field_encr = stanza.get('encrs')
if isinstance(field_clear, dict):
clear_password = ""
encr_password = ""
for index in sorted(field_clear.keys()):
clear_password += field_clear.get(index)
encr_password += field_encr.get(index)
- stanza["clear_password"] = clear_password
- stanza["encr_password"] = encr_password
+ stanza['clear_password'] = clear_password
+ stanza['encr_password'] = encr_password
- del stanza["clears"]
- del stanza["encrs"]
+ del stanza['clears']
+ del stanza['encrs']
return list(results.values())
def _get_all_passwords(self):
@@ -313,8 +265,7 @@ def _get_all_passwords(self):
endpoint = self._get_endpoint()
response, content = rest.splunkd_request(
- endpoint, self._session_key, method="GET"
- )
+ endpoint, self._session_key, method="GET")
if response and response.status in (200, 201, "200", "201") and content:
return xdp.parse_conf_xml_dom(content)
raise CredException("Failed to get credentials")
@@ -351,22 +302,16 @@ def _get_credentials(self, prop, name=None):
values = stanza[prop].split(self._sep)
if len(values) % 2 == 1:
continue
- result = {values[i]: values[i + 1] for i in range(0, len(values), 2)}
+ result = {values[i]: values[i + 1]
+ for i in range(0, len(values), 2)}
results[stanza.get("username")] = result
return results
@staticmethod
def _build_name(realm, name):
return util.format_stanza_name(
- "".join(
- (
- CredentialManager._escape_string(realm),
- ":",
- CredentialManager._escape_string(name),
- ":",
- )
- )
- )
+ "".join((CredentialManager._escape_string(realm), ":",
+ CredentialManager._escape_string(name), ":")))
@staticmethod
def _escape_string(string_to_escape):
@@ -388,10 +333,8 @@ def _get_endpoint(self, name=None, query=False):
if name:
realm_user = self._build_name(self._realm, name)
rest_endpoint = "{}/servicesNS/{}/{}/storage/passwords/{}".format(
- self._splunkd_uri, owner, app, realm_user
- )
+ self._splunkd_uri, owner, app, realm_user)
else:
- rest_endpoint = "{}/servicesNS/{}/{}/storage/passwords?count=-1" "".format(
- self._splunkd_uri, owner, app
- )
+ rest_endpoint = "{}/servicesNS/{}/{}/storage/passwords?count=-1" \
+ "".format(self._splunkd_uri, owner, app)
return rest_endpoint
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/event_writer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/event_writer.py
new file mode 100755
index 00000000..da3d24ea
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/event_writer.py
@@ -0,0 +1,92 @@
+from future import standard_library
+standard_library.install_aliases()
+from six import string_types
+from builtins import object
+import queue
+import multiprocessing
+import threading
+import sys
+from collections import Iterable
+from .common import log
+
+
+class EventWriter(object):
+
+ def __init__(self, process_safe=False):
+ if process_safe:
+ self._mgr = multiprocessing.Manager()
+ self._event_queue = self._mgr.Queue(1000)
+ else:
+ self._event_queue = queue.Queue(1000)
+ self._event_writer = threading.Thread(target=self._do_write_events)
+ self._event_writer.daemon = True
+ self._started = False
+ self._exception = False
+
+ def start(self):
+ if self._started:
+ return
+ self._started = True
+
+ self._event_writer.start()
+ log.logger.info("Event writer started.")
+
+ def tear_down(self):
+ if not self._started:
+ return
+ self._started = False
+
+ self._event_queue.put(None)
+ self._event_writer.join()
+ log.logger.info("Event writer stopped.")
+
+ def isopen(self):
+ return self._started and (not self._exception)
+
+ def write_events(self, events):
+ if not self.isopen():
+ return False
+ if events is None:
+ return True
+ self._event_queue.put(events)
+ return True
+
+ def _do_write_events(self):
+ event_queue = self._event_queue
+ write = sys.stdout.write
+ got_shutdown_signal = False
+
+ while 1:
+ try:
+ event = event_queue.get(timeout=3)
+ if event is not None:
+ if isinstance(event, string_types):
+ if sys.version_info[0] > 2:
+ event = event.encode("utf-8")
+ write(event)
+ elif isinstance(event, Iterable):
+ for evt in event:
+ if sys.version_info[0] > 2 and isinstance(evt, string_types):
+ evt = evt.encode("utf-8")
+ write(evt)
+ else:
+ log.logger.info("Event writer got tear down signal")
+ got_shutdown_signal = True
+ except queue.Empty:
+ # We need drain the queue before shutdown
+ # timeout means empty for now
+ if got_shutdown_signal:
+ log.logger.info("Event writer is going to exit...")
+ break
+ else:
+ continue
+ except Exception:
+ log.logger.exception("EventWriter encounter exception which may"
+ "cause data loss, queue leftsize={"
+ "}".format(
+ event_queue.qsize()))
+ self._exception = True
+ break
+
+ log.logger.info("Event writer stopped, queue leftsize={}".format(
+ event_queue.qsize()))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/file_monitor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/file_monitor.py
old mode 100644
new mode 100755
similarity index 54%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/file_monitor.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/file_monitor.py
index ed04c14b..093fdad5
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/file_monitor.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/file_monitor.py
@@ -1,42 +1,23 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+from builtins import object
import os.path as op
import traceback
-import warnings
-from splunktalib.common import log
+from .common import log
+
+class FileMonitor(object):
-class FileMonitor:
def __init__(self, callback, files):
"""
:files: files to be monidtored with full path
"""
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._callback = callback
self._files = files
- self.file_mtimes = {file_name: None for file_name in self._files}
+ self.file_mtimes = {
+ file_name: None for file_name in self._files
+ }
for k in self.file_mtimes:
if not op.exists(k):
continue
@@ -46,9 +27,8 @@ def __init__(self, callback, files):
continue
self.file_mtimes[k] = op.getmtime(k)
except OSError:
- log.logger.error(
- "Getmtime for %s, failed: %s", k, traceback.format_exc()
- )
+ log.logger.error("Getmtime for %s, failed: %s",
+ k, traceback.format_exc())
def __call__(self):
return self.check_changes()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/kv_client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/kv_client.py
old mode 100644
new mode 100755
similarity index 71%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/kv_client.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/kv_client.py
index 8eb20228..40d24a3a
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/kv_client.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/kv_client.py
@@ -1,26 +1,10 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import json
+from builtins import range
+from builtins import object
import re
-import warnings
-
-from defusedxml import ElementTree as et
+import json
+from xml.etree import cElementTree as et
-import splunktalib.rest as rest
+from . import rest as rest
class KVException(Exception):
@@ -35,14 +19,9 @@ class KVNotExists(KVException):
pass
-class KVClient:
+class KVClient(object):
+
def __init__(self, splunkd_host, session_key):
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._splunkd_host = splunkd_host
self._session_key = session_key
@@ -56,7 +35,9 @@ def create_collection(self, collection, app, owner="nobody"):
assert app
uri = self._get_config_endpoint(app, owner)
- data = {"name": collection}
+ data = {
+ "name": collection
+ }
self._do_request(uri, "POST", data)
def list_collection(self, collection=None, app=None, owner="nobody"):
@@ -75,7 +56,7 @@ def list_collection(self, collection=None, app=None, owner="nobody"):
path = "./entry/title"
if m:
ns = m.group(1)
- path = "./{{{}}}entry/{{{}}}title".format(ns, ns)
+ path = "./{%s}entry/{%s}title" % (ns, ns)
collections = et.fromstring(content)
return [node.text for node in collections.iterfind(path)]
@@ -107,7 +88,8 @@ def insert_collection_data(self, collection, data, app, owner="nobody"):
assert app
uri = self._get_data_endpoint(app, owner, collection)
- key = self._do_request(uri, "POST", data, content_type="application/json")
+ key = self._do_request(uri, "POST", data,
+ content_type="application/json")
return json.loads(key)
def delete_collection_data(self, collection, key_id, app, owner="nobody"):
@@ -123,7 +105,8 @@ def delete_collection_data(self, collection, key_id, app, owner="nobody"):
uri = self._get_data_endpoint(app, owner, collection, key_id)
self._do_request(uri, "DELETE", content_type="application/json")
- def update_collection_data(self, collection, key_id, data, app, owner="nobody"):
+ def update_collection_data(self, collection, key_id, data,
+ app, owner="nobody"):
"""
:collection: collection name
:key_id: key id returned when creation
@@ -135,7 +118,8 @@ def update_collection_data(self, collection, key_id, data, app, owner="nobody"):
assert app
uri = self._get_data_endpoint(app, owner, collection, key_id)
- k = self._do_request(uri, "POST", data, content_type="application/json")
+ k = self._do_request(uri, "POST", data,
+ content_type="application/json")
return json.loads(k)
def get_collection_data(self, collection, key_id, app, owner="nobody"):
@@ -154,27 +138,24 @@ def get_collection_data(self, collection, key_id, app, owner="nobody"):
k = self._do_request(uri, "GET")
return json.loads(k)
- def _do_request(
- self, uri, method, data=None, content_type="application/x-www-form-urlencoded"
- ):
+ def _do_request(self, uri, method, data=None,
+ content_type="application/x-www-form-urlencoded"):
headers = {"Content-Type": content_type}
- resp, content = rest.splunkd_request(
- uri, self._session_key, method, headers, data
- )
+ resp, content = rest.splunkd_request(uri, self._session_key,
+ method, headers, data)
if resp is None and content is None:
- raise KVException("Failed uri={}, data={}".format(uri, data))
+ raise KVException("Failed uri={0}, data={1}".format(uri, data))
if resp.status in (200, 201):
return content
elif resp.status == 409:
- raise KVAlreadyExists("{}-{} already exists".format(uri, data))
+ raise KVAlreadyExists("{0}-{1} already exists".format(uri, data))
elif resp.status == 404:
- raise KVNotExists("{}-{} not exists".format(uri, data))
+ raise KVNotExists("{0}-{1} not exists".format(uri, data))
else:
- raise KVException(
- "Failed to {} {}, reason={}".format(method, uri, resp.reason)
- )
+ raise KVException("Failed to {0} {1}, reason={2}".format(
+ method, uri, resp.reason))
def _get_config_endpoint(self, app, owner, collection=None):
uri = "{0}/servicesNS/{1}/{2}/storage/collections/config"
@@ -194,19 +175,13 @@ def _do_get_endpoint(self, app, owner, collection, key_id, uri_template):
uri = uri_template.format(self._splunkd_host, owner, app)
if collection is not None:
- uri += "/{}".format(collection)
+ uri += "/{0}".format(collection)
if key_id is not None:
- uri += "/{}".format(key_id)
+ uri += "/{0}".format(key_id)
return uri
def create_collection(kv_client, collection, appname):
- warnings.warn(
- "This function is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
not_exists = False
try:
res = kv_client.list_collection(collection, appname)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/modinput.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/modinput.py
old mode 100644
new mode 100755
similarity index 79%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/modinput.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/modinput.py
index 04bfd1d3..1ec1cb41
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/modinput.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/modinput.py
@@ -1,25 +1,9 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import subprocess
import sys
+import subprocess
import traceback
-import splunktalib.splunk_platform as sp
-from splunktalib.common import log
+from . import splunk_platform as sp
+from .common import log
def _parse_modinput_configs(root, outer_block, inner_block):
@@ -64,7 +48,9 @@ def _parse_modinput_configs(root, outer_block, inner_block):
confs = root.getElementsByTagName(outer_block)
if not confs:
log.logger.error("Invalid config, missing %s section", outer_block)
- raise Exception("Invalid config, missing %s section".format(outer_block))
+ raise Exception("Invalid config, missing %s section".format(
+ outer_block
+ ))
configs = []
stanzas = confs[0].getElementsByTagName(inner_block)
@@ -79,11 +65,8 @@ def _parse_modinput_configs(root, outer_block, inner_block):
params = stanza.getElementsByTagName("param")
for param in params:
name = param.getAttribute("name")
- if (
- name
- and param.firstChild
- and param.firstChild.nodeType == param.firstChild.TEXT_NODE
- ):
+ if (name and param.firstChild and
+ param.firstChild.nodeType == param.firstChild.TEXT_NODE):
config[name] = param.firstChild.data
configs.append(config)
return configs
@@ -95,7 +78,7 @@ def parse_modinput_configs(config_str):
@return: meta_config and stanza_config
"""
- import defusedxml.minidom as xdm
+ import xml.dom.minidom as xdm
meta_configs = {
"server_host": None,
@@ -111,7 +94,8 @@ def parse_modinput_configs(config_str):
log.logger.error("Invalid config, missing %s section", tag)
raise Exception("Invalid config, missing %s section", tag)
- if nodes[0].firstChild and nodes[0].firstChild.nodeType == nodes[0].TEXT_NODE:
+ if (nodes[0].firstChild and
+ nodes[0].firstChild.nodeType == nodes[0].TEXT_NODE):
meta_configs[tag] = nodes[0].firstChild.data
else:
log.logger.error("Invalid config, expect text ndoe")
@@ -137,9 +121,8 @@ def get_modinput_configs_from_cli(modinput, modinput_stanza=None):
if modinput_stanza:
cli.append(modinput_stanza)
- out, err = subprocess.Popen(
- cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE
- ).communicate()
+ out, err = subprocess.Popen(cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).communicate()
if err:
log.logger.error("Failed to get modinput configs with error: %s", err)
return None, None
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/orphan_process_monitor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/orphan_process_monitor.py
old mode 100644
new mode 100755
similarity index 52%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/orphan_process_monitor.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/orphan_process_monitor.py
index fe93f7d8..e74e7e99
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/orphan_process_monitor.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/orphan_process_monitor.py
@@ -1,29 +1,14 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+from builtins import object
import os
import threading
import time
import traceback
-import warnings
-from splunktalib.common import log
+from ..splunktalib.common import log
+
+class OrphanProcessChecker(object):
-class OrphanProcessChecker:
def __init__(self, callback=None):
"""
Only work for Linux platform. On Windows platform, is_orphan is always
@@ -51,16 +36,9 @@ def check_orphan(self):
return res
-class OrphanProcessMonitor:
+class OrphanProcessMonitor(object):
+
def __init__(self, callback):
- warnings.warn(
- "splunktalib's OrphanProcessMonitor is going to be deprecated and "
- "removed. Please switch to solnlib's "
- "(https://github.com/splunk/addonfactory-solutions-library-python) "
- "version of OrphanProcessMonitor located in orphan_process_monitor.py.",
- DeprecationWarning,
- stacklevel=2,
- )
self._checker = OrphanProcessChecker(callback)
self._thr = threading.Thread(target=self._do_monitor)
self._thr.daemon = True
@@ -84,7 +62,5 @@ def _do_monitor(self):
break
time.sleep(1)
except Exception:
- log.logger.error(
- "Failed to monitor orphan process, reason=%s",
- traceback.format_exc(),
- )
+ log.logger.error("Failed to monitor orphan process, reason=%s",
+ traceback.format_exc())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/rest.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/rest.py
old mode 100644
new mode 100755
similarity index 58%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/rest.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/rest.py
index 4a3515ae..1c5dc5d9
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/rest.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/rest.py
@@ -1,40 +1,25 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+import urllib.request, urllib.parse, urllib.error
import json
-import urllib.error
-import urllib.parse
-import urllib.request
from traceback import format_exc
-from httplib2 import Http, ProxyInfo, socks
+from .common import util as scu
+from .common import log as log
+
-import splunktalib.common.log as log
-import splunktalib.common.util as scu
+from httplib2 import (socks, ProxyInfo, Http)
-def splunkd_request(
- splunkd_uri, session_key, method="GET", headers=None, data=None, timeout=30, retry=1
-):
+def splunkd_request(splunkd_uri, session_key, method="GET",
+ headers=None, data=None, timeout=30, retry=1):
"""
:return: httplib2.Response and content
"""
headers = headers if headers is not None else {}
- headers["Authorization"] = "Splunk {}".format(session_key)
+ headers["Authorization"] = "Splunk {0}".format(session_key)
content_type = headers.get("Content-Type")
if not content_type:
content_type = headers.get("content-type")
@@ -54,17 +39,17 @@ def splunkd_request(
resp, content = None, None
for _ in range(retry):
try:
- resp, content = http.request(
- splunkd_uri, method=method, headers=headers, body=data
- )
+ resp, content = http.request(splunkd_uri, method=method,
+ headers=headers, body=data)
+ if content:
+ content = content.decode()
except Exception:
log.logger.error(msg_temp, splunkd_uri, "unknown", format_exc())
else:
if resp.status not in (200, 201):
if not (method == "GET" and resp.status == 404):
- log.logger.debug(
- msg_temp, splunkd_uri, resp.status, code_to_msg(resp, content)
- )
+ log.logger.debug(msg_temp, splunkd_uri, resp.status,
+ code_to_msg(resp, content))
else:
return resp, content
else:
@@ -80,10 +65,8 @@ def code_to_msg(resp, content):
404: "Requested endpoint does not exist.",
409: "Invalid operation for this endpoint. reason={}".format(content),
500: "Unspecified internal server error. reason={}".format(content),
- 503: (
- "Feature is disabled in the configuration file. "
- "reason={}".format(content)
- ),
+ 503: ("Feature is disabled in the configuration file. "
+ "reason={}".format(content)),
}
return code_msg_tbl.get(resp.status, content)
@@ -121,31 +104,23 @@ def build_http_connection(config, timeout=120, disable_ssl_validation=False):
proxy_info = None
if config.get("proxy_url") and config.get("proxy_port"):
if config.get("proxy_username") and config.get("proxy_password"):
- proxy_info = ProxyInfo(
- proxy_type=proxy_type,
- proxy_host=config["proxy_url"],
- proxy_port=int(config["proxy_port"]),
- proxy_user=config["proxy_username"],
- proxy_pass=config["proxy_password"],
- proxy_rdns=rdns,
- )
+ proxy_info = ProxyInfo(proxy_type=proxy_type,
+ proxy_host=config["proxy_url"],
+ proxy_port=int(config["proxy_port"]),
+ proxy_user=config["proxy_username"],
+ proxy_pass=config["proxy_password"],
+ proxy_rdns=rdns)
else:
- proxy_info = ProxyInfo(
- proxy_type=proxy_type,
- proxy_host=config["proxy_url"],
- proxy_port=int(config["proxy_port"]),
- proxy_rdns=rdns,
- )
+ proxy_info = ProxyInfo(proxy_type=proxy_type,
+ proxy_host=config["proxy_url"],
+ proxy_port=int(config["proxy_port"]),
+ proxy_rdns=rdns)
if proxy_info:
- http = Http(
- proxy_info=proxy_info,
- timeout=timeout,
- disable_ssl_certificate_validation=disable_ssl_validation,
- )
+ http = Http(proxy_info=proxy_info, timeout=timeout,
+ disable_ssl_certificate_validation=disable_ssl_validation)
else:
- http = Http(
- timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation
- )
+ http = Http(timeout=timeout,
+ disable_ssl_certificate_validation=disable_ssl_validation)
if config.get("username") and config.get("password"):
http.add_credentials(config["username"], config["password"])
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/__init__.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/__init__.py
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/job.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/job.py
old mode 100644
new mode 100755
similarity index 72%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/job.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/job.py
index de7e939a..a5bb76dd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/job.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/job.py
@@ -1,24 +1,9 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+from builtins import object
import threading
import time
-class Job:
+class Job(object):
"""
Timer wraps the callback and timestamp related stuff
"""
@@ -96,23 +81,23 @@ def __cmp__(self, other):
def __eq__(self, other):
return isinstance(other, Job) and (self.ident() == other.ident())
- def __lt__(self, other):
- return self.__cmp__(other) == -1
-
- def __gt__(self, other):
- return self.__cmp__(other) == 1
+ def __hash__(self):
+ return hash(self.ident())
def __ne__(self, other):
- return not self.__eq__(other)
+ return self.__cmp__(other) != 0
- def __le__(self, other):
- return self.__lt__(other) or self.__eq__(other)
+ def __gt__(self, other):
+ return self.__cmp__(other) > 0
+
+ def __lt__(self, other):
+ return self.__cmp__(other) < 0
def __ge__(self, other):
- return self.__gt__(other) or self.__eq__(other)
+ return self.__cmp__(other) >= 0
- def __hash__(self):
- return self.ident()
+ def __le__(self, other):
+ return self.__cmp__(other) <= 0
def __call__(self):
self._func(self)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/scheduler.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/scheduler.py
old mode 100644
new mode 100755
similarity index 79%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/scheduler.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/scheduler.py
index a03a7cc0..dae26728
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/schedule/scheduler.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/schedule/scheduler.py
@@ -1,28 +1,14 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import queue
-import random
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
import threading
from time import time
-
-from splunktalib.common import log
+import random
+import queue
+from ..common import log
-class Scheduler:
+class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
@@ -96,7 +82,7 @@ def get_ready_jobs(self):
ready_jobs.append(job)
if ready_jobs:
- del job_set[: len(ready_jobs)]
+ del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
@@ -107,17 +93,14 @@ def get_ready_jobs(self):
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
- log.logger.warn("Scheduler satuation, sleep_time=%s", sleep_time)
+ log.logger.warn("Scheduler satuation, sleep_time=%s",
+ sleep_time)
sleep_time = 0.1
if ready_jobs:
- log.logger.info(
- "Get %d ready jobs, next duration is %f, "
- "and there are %s jobs scheduling",
- len(ready_jobs),
- sleep_time,
- total_jobs,
- )
+ log.logger.info("Get %d ready jobs, next duration is %f, "
+ "and there are %s jobs scheduling",
+ len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/setting.conf b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/setting.conf
new file mode 100755
index 00000000..f7973ad8
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/setting.conf
@@ -0,0 +1,5 @@
+[global]
+process_size = 0
+thread_min_size = 4
+thread_max_size = 128
+task_queue_size = 1024
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_cluster.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_cluster.py
new file mode 100755
index 00000000..9adaf4f9
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_cluster.py
@@ -0,0 +1,54 @@
+from builtins import object
+from ..splunktalib import rest
+from ..splunktalib.common import xml_dom_parser as xdp
+
+
+def _do_rest(uri, session_key):
+ resp, content = rest.splunkd_request(uri, session_key)
+ if resp is None:
+ return None
+
+ if resp.status not in (200, 201):
+ return None
+
+ stanza_objs = xdp.parse_conf_xml_dom(content)
+ if not stanza_objs:
+ return None
+
+ return stanza_objs[0]
+
+
+class ServerInfo(object):
+
+ def __init__(self, splunkd_uri, session_key):
+ uri = "{}/services/server/info".format(splunkd_uri)
+ server_info = _do_rest(uri, session_key)
+ if server_info is None:
+ raise Exception("Failed to init ServerInfo")
+
+ self._server_info = server_info
+
+ def is_captain(self):
+ """
+ :return: True if splunkd_uri is captain otherwise False
+ """
+
+ return "shc_captain" in self._server_info["server_roles"]
+
+ def is_search_head(self):
+ for sh in ("search_head", "cluster_search_head"):
+ if sh in self._server_info["server_roles"]:
+ return True
+ return False
+
+ def is_shc_member(self):
+ server_roles = self._server_info['server_roles']
+ return any(
+ role in server_roles for role in ('shc_member', 'shc_captain')
+ )
+
+ def version(self):
+ return self._server_info["version"]
+
+ def to_dict(self):
+ return self._server_info
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_platform.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_platform.py
new file mode 100755
index 00000000..cc35d8ba
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/splunk_platform.py
@@ -0,0 +1,112 @@
+from future import standard_library
+standard_library.install_aliases()
+import os
+import os.path as op
+import subprocess
+from configparser import ConfigParser
+from io import StringIO
+
+from .common import util as scu
+
+import sys
+PY_VERSION = (sys.version_info[0], sys.version_info[1])
+
+def make_splunkhome_path(parts):
+ """
+ create a path string by the several parts of the path
+ """
+
+ relpath = os.path.normpath(os.path.join(*parts))
+
+ basepath = os.environ["SPLUNK_HOME"] # Assume SPLUNK_HOME env has been set
+
+ fullpath = os.path.normpath(os.path.join(basepath, relpath))
+
+ # Check that we haven't escaped from intended parent directories.
+ if os.path.relpath(fullpath, basepath)[0:2] == '..':
+ raise ValueError('Illegal escape from parent directory "%s": %s' %
+ (basepath, fullpath))
+
+ return fullpath
+
+
+def get_splunk_bin():
+ if os.name == "nt":
+ splunk_bin = "splunk.exe"
+ else:
+ splunk_bin = "splunk"
+ return make_splunkhome_path(("bin", splunk_bin))
+
+
+def get_appname_from_path(absolute_path):
+ return scu.get_appname_from_path(absolute_path)
+
+
+def _get_merged_conf_raw(conf_name):
+ """
+ :conf_name: configure file name
+ :return: raw output of all contents for the same conf file
+ Note: it depends on SPLUNK_HOME env variable
+ """
+
+ assert conf_name
+
+ if conf_name.endswith(".conf"):
+ conf_name = conf_name[:-5]
+
+ # FIXME dynamically caculate SPLUNK_HOME
+ btool_cli = [op.join(os.environ["SPLUNK_HOME"], "bin", "btool"),
+ conf_name, "list"]
+
+ try:
+ p = subprocess.Popen(btool_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ except OSError:
+ raise
+
+ return out
+
+
+def _get_conf_stanzas(conf_name):
+ """
+ :return: {stanza_name: stanza_configs}, dict of dict
+ """
+
+ res = _get_merged_conf_raw(conf_name)
+ res = res.decode('utf-8')
+ res = StringIO(res)
+ parser = ConfigParser()
+ parser.optionxform = str
+ if PY_VERSION >= (3, 2):
+ parser.read_file(res)
+ else:
+ parser.readfp(res)
+ res = {}
+ for section in parser.sections():
+ res[section] = {item[0]: item[1] for item in parser.items(section)}
+ return res
+
+
+def get_splunkd_uri():
+ if "SPLUNKD_URI" in os.environ:
+ return os.environ["SPLUNKD_URI"]
+ else:
+ server_conf = _get_conf_stanzas("server")
+ if server_conf["sslConfig"]["enableSplunkdSSL"].lower() == "true":
+ http = "https://"
+ else:
+ http = "http://"
+
+ web_conf = _get_conf_stanzas("web")
+ host_port = web_conf["settings"]["mgmtHostPort"]
+ splunkd_uri = "{}{}".format(http, host_port)
+
+ if os.environ.get("SPLUNK_BINDIP"):
+ bip = os.environ["SPLUNK_BINDIP"]
+ port_idx = bip.rfind(":")
+ if port_idx > 0:
+ bip = bip[:port_idx]
+ port = host_port[host_port.rfind(":"):]
+ splunkd_uri = "{}{}{}".format(http, bip, port)
+ return splunkd_uri
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/state_store.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/state_store.py
new file mode 100755
index 00000000..f8811f65
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/state_store.py
@@ -0,0 +1,255 @@
+from builtins import object
+import json
+import os
+import os.path as op
+import time
+import traceback
+from abc import abstractmethod
+
+from ..splunktacollectorlib.common import log as stulog
+from ..splunktalib import kv_client as kvc
+from ..splunktalib.common import util
+
+
+def get_state_store(meta_configs,
+ appname,
+ collection_name="talib_states",
+ use_kv_store=False,
+ use_cache_file=True,
+ max_cache_seconds=5):
+ if util.is_true(use_kv_store):
+ # KV store based checkpoint
+ return StateStore(appname, meta_configs['server_uri'], meta_configs['session_key'], collection_name)
+ checkpoint_dir = meta_configs['checkpoint_dir']
+ if util.is_true(use_cache_file):
+ return CachedFileStateStore(appname, checkpoint_dir, max_cache_seconds)
+ return FileStateStore(appname, checkpoint_dir)
+
+
+class BaseStateStore(object):
+ def __init__(self, app_name):
+ self._app_name = app_name
+
+ @abstractmethod
+ def update_state(self, key, states):
+ pass
+
+ @abstractmethod
+ def get_state(self, key):
+ pass
+
+ @abstractmethod
+ def delete_state(self, key):
+ pass
+
+ def close(self, key=None):
+ pass
+
+
+class StateStore(BaseStateStore):
+ def __init__(self, app_name, server_uri, session_key, collection_name="talib_states"):
+ """
+ :meta_configs: dict like and contains checkpoint_dir, session_key,
+ server_uri etc
+ :app_name: the name of the app
+ :collection_name: the collection name to be used.
+ Don"t use other method to visit the collection if you are using
+ StateStore to visit it.
+ """
+ super(StateStore, self).__init__(app_name)
+
+ # State cache is a dict from _key to value
+ self._states_cache = {}
+ self._kv_client = None
+ self._collection = collection_name
+ self._kv_client = kvc.KVClient(
+ splunkd_host=server_uri,
+ session_key=session_key
+ )
+ kvc.create_collection(self._kv_client, self._collection, self._app_name)
+ self._load_states_cache()
+
+ def update_state(self, key, states):
+ """
+ :state: Any JSON serializable
+ :return: None if successful, otherwise throws exception
+ """
+
+ data = {'value': json.dumps(states)}
+
+ if key not in self._states_cache:
+ data['_key'] = key
+ self._kv_client.insert_collection_data(
+ collection=self._collection, data=data, app=self._app_name
+ )
+ else:
+ self._kv_client.update_collection_data(
+ collection=self._collection, key_id=key, data=data, app=self._app_name
+ )
+ self._states_cache[key] = states
+
+ def get_state(self, key=None):
+ if key:
+ return self._states_cache.get(key, None)
+ return self._states_cache
+
+ def delete_state(self, key=None):
+ if key:
+ self._delete_state(key)
+ else:
+ for key in list(self._states_cache.keys()):
+ self._delete_state(key)
+
+ def _delete_state(self, key):
+ if key not in self._states_cache:
+ return
+
+ self._kv_client.delete_collection_data(
+ self._collection, key, self._app_name)
+ del self._states_cache[key]
+
+ def _load_states_cache(self):
+ states = self._kv_client.get_collection_data(
+ self._collection, None, self._app_name)
+ if not states:
+ return
+
+ for state in states:
+ value = state['value'] if 'value' in state else state
+ key = state['_key']
+ try:
+ value = json.loads(value)
+ except Exception:
+ stulog.logger.warning(
+ 'Unable to load state from cache, key=%s, error=%s',
+ key, traceback.format_exc())
+ pass
+
+ self._states_cache[key] = value
+
+
+def _create_checkpoint_dir_if_needed(checkpoint_dir):
+ if os.path.isdir(checkpoint_dir):
+ return
+
+ stulog.logger.info(
+ "Checkpoint dir '%s' doesn't exist, try to create it",
+ checkpoint_dir)
+ try:
+ os.mkdir(checkpoint_dir)
+ except OSError:
+ stulog.logger.exception(
+ "Failure creating checkpoint dir '%s'", checkpoint_dir
+ )
+ raise Exception(
+ "Unable to create checkpoint dir '{}'".format(checkpoint_dir)
+ )
+
+
+class FileStateStore(BaseStateStore):
+ def __init__(self, app_name, checkpoint_dir):
+ super(FileStateStore, self).__init__(app_name)
+ self._checkpoint_dir = checkpoint_dir
+
+ def _get_checkpoint_file(self, filename):
+ return op.join(self._checkpoint_dir, filename)
+
+ @staticmethod
+ def _remove_if_exist(filename):
+ if op.exists(filename):
+ os.remove(filename)
+
+ def update_state(self, key, states):
+ """
+ :state: Any JSON serializable
+ :return: None if successful, otherwise throws exception
+ """
+
+ _create_checkpoint_dir_if_needed(self._checkpoint_dir)
+
+ filename = self._get_checkpoint_file(key)
+ with open(filename + ".new", "w") as json_file:
+ json.dump(states, json_file)
+
+ self._remove_if_exist(filename)
+
+ os.rename(filename + ".new", filename)
+
+ def get_state(self, key):
+ filename = self._get_checkpoint_file(key)
+ if op.exists(filename):
+ with open(filename) as json_file:
+ state = json.load(json_file)
+ return state
+ else:
+ return None
+
+ def delete_state(self, key):
+ self._remove_if_exist(self._get_checkpoint_file(key))
+
+
+class CachedFileStateStore(FileStateStore):
+ def __init__(self, app_name, checkpoint_dir, max_cache_seconds=5):
+ """
+ :meta_configs: dict like and contains checkpoint_dir, session_key,
+ server_uri etc
+ """
+
+ super(CachedFileStateStore, self).__init__(app_name, checkpoint_dir)
+ self._states_cache = {} # item: time, dict
+ self._states_cache_lmd = {} # item: time, dict
+ self.max_cache_seconds = max_cache_seconds
+
+ def update_state(self, key, states):
+ now = time.time()
+ if key in self._states_cache:
+ last = self._states_cache_lmd[key][0]
+ if now - last >= self.max_cache_seconds:
+ self._update_and_flush_state(now, key, states)
+ else:
+ self._update_and_flush_state(now, key, states)
+ self._states_cache[key] = (now, states)
+
+ def _update_and_flush_state(self, now, key, states):
+ """
+ :state: Any JSON serializable
+ :return: None if successful, otherwise throws exception
+ """
+ self._states_cache_lmd[key] = (now, states)
+ super(CachedFileStateStore, self).update_state(key, states)
+
+ def get_state(self, key):
+ if key in self._states_cache:
+ return self._states_cache[key][1]
+
+ filename = self._get_checkpoint_file(key)
+
+ if op.exists(filename):
+ with open(filename) as json_file:
+ state = json.load(json_file)
+ now = time.time()
+ self._states_cache[key] = now, state
+ self._states_cache_lmd[key] = now, state
+ return state
+ else:
+ return None
+
+ def delete_state(self, key):
+ super(CachedFileStateStore, self).delete_state(key)
+
+ if self._states_cache.get(key):
+ del self._states_cache[key]
+ if self._states_cache_lmd.get(key):
+ del self._states_cache_lmd[key]
+
+ def close(self, key=None):
+ if not key:
+ for k, (t, s) in self._states_cache.items():
+ self._update_and_flush_state(t, k, s)
+ self._states_cache.clear()
+ self._states_cache_lmd.clear()
+ elif key in self._states_cache:
+ self._update_and_flush_state(self._states_cache[key][0], key,
+ self._states_cache[key][1])
+ del self._states_cache[key]
+ del self._states_cache_lmd[key]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer.py
old mode 100644
new mode 100755
similarity index 58%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer.py
index 84fbf899..7d91617a
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer.py
@@ -1,24 +1,8 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+from builtins import object
import threading
-import warnings
-class Timer:
+class Timer(object):
"""
Timer wraps the callback and timestamp related stuff
"""
@@ -27,12 +11,6 @@ class Timer:
_lock = threading.Lock()
def __init__(self, callback, when, interval, ident=None):
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._callback = callback
self._when = when
self._interval = interval
@@ -76,23 +54,23 @@ def __cmp__(self, other):
def __eq__(self, other):
return isinstance(other, Timer) and (self.ident() == other.ident())
- def __lt__(self, other):
- return self.__cmp__(other) == -1
-
- def __gt__(self, other):
- return self.__cmp__(other) == 1
+ def __hash__(self):
+ return hash(self.ident())
def __ne__(self, other):
- return not self.__eq__(other)
+ return self.__cmp__(other) != 0
- def __le__(self, other):
- return self.__lt__(other) or self.__eq__(other)
+ def __gt__(self, other):
+ return self.__cmp__(other) > 0
+
+ def __lt__(self, other):
+ return self.__cmp__(other) < 0
def __ge__(self, other):
- return self.__gt__(other) or self.__eq__(other)
+ return self.__cmp__(other) >= 0
- def __hash__(self):
- return self.ident()
+ def __le__(self, other):
+ return self.__cmp__(other) <= 0
def __call__(self):
self._callback()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer_queue.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer_queue.py
old mode 100644
new mode 100755
similarity index 76%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer_queue.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer_queue.py
index 4eba00d1..de83b7a0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunktalib/timer_queue.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/timer_queue.py
@@ -1,34 +1,20 @@
-#
-# Copyright 2021 Splunk Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
"""
A timer queue implementation
"""
-import queue
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
import threading
-import traceback
-import warnings
+import queue
from time import time
+import traceback
-from splunktalib.common import log
-from splunktalib.timer import Timer
+from .timer import Timer
+from .common import log
-class TimerQueue:
+class TimerQueue(object):
"""
A timer queue implementation, runs a separate thread to handle timers
"""
@@ -36,12 +22,6 @@ class TimerQueue:
import sortedcontainers as sc
def __init__(self):
- warnings.warn(
- "This class is deprecated. "
- "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38",
- DeprecationWarning,
- stacklevel=2,
- )
self._timers = TimerQueue.sc.SortedSet()
self._cancelling_timers = {}
self._lock = threading.Lock()
@@ -88,10 +68,8 @@ def remove_timer(self, timer):
try:
self._timers.remove(timer)
except ValueError:
- log.logger.info(
- "Timer=%s is not in queue, move it to cancelling " "list",
- timer.ident(),
- )
+ log.logger.info("Timer=%s is not in queue, move it to cancelling "
+ "list", timer.ident())
else:
self._cancelling_timers[timer.ident()] = timer
@@ -135,7 +113,7 @@ def _get_expired_timers(self):
expired_timers.append(timer)
if expired_timers:
- del self._timers[: len(expired_timers)]
+ del self._timers[:len(expired_timers)]
if self._timers:
next_expired_time = self._timers[0].get_expiration()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/configparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/configparser.py
new file mode 100755
index 00000000..d8dbe931
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/configparser.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Convenience module importing everything from backports.configparser."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from backports.configparser import (
+ RawConfigParser,
+ ConfigParser,
+ SafeConfigParser,
+ SectionProxy,
+
+ Interpolation,
+ BasicInterpolation,
+ ExtendedInterpolation,
+ LegacyInterpolation,
+
+ NoSectionError,
+ DuplicateSectionError,
+ DuplicateOptionError,
+ NoOptionError,
+ InterpolationError,
+ InterpolationMissingOptionError,
+ InterpolationSyntaxError,
+ InterpolationDepthError,
+ ParsingError,
+ MissingSectionHeaderError,
+ ConverterMapping,
+
+ DEFAULTSECT,
+ MAX_INTERPOLATION_DEPTH,
+)
+
+from backports.configparser import ( # noqa: F401
+ Error,
+ _UNSET,
+ _default_dict,
+ _ChainMap,
+)
+
+__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
+ "NoOptionError", "InterpolationError", "InterpolationDepthError",
+ "InterpolationMissingOptionError", "InterpolationSyntaxError",
+ "ParsingError", "MissingSectionHeaderError",
+ "ConfigParser", "SafeConfigParser", "RawConfigParser",
+ "Interpolation", "BasicInterpolation", "ExtendedInterpolation",
+ "LegacyInterpolation", "SectionProxy", "ConverterMapping",
+ "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
+
+# NOTE: names missing from __all__ imported anyway for backwards compatibility.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/LICENSE.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/LICENSE.txt
deleted file mode 100644
index b0ade048..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2005-2018, Michele Simionato
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- Redistributions in bytecode form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/METADATA
deleted file mode 100644
index fd12277a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/METADATA
+++ /dev/null
@@ -1,131 +0,0 @@
-Metadata-Version: 2.1
-Name: decorator
-Version: 4.4.2
-Summary: Decorators for Humans
-Home-page: https://github.com/micheles/decorator
-Author: Michele Simionato
-Author-email: michele.simionato@gmail.com
-License: new BSD License
-Keywords: decorators generic utility
-Platform: All
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Natural Language :: English
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Requires-Python: >=2.6, !=3.0.*, !=3.1.*
-
-Decorators for Humans
-=====================
-
-The goal of the decorator module is to make it easy to define
-signature-preserving function decorators and decorator factories.
-It also includes an implementation of multiple dispatch and other niceties
-(please check the docs). It is released under a two-clauses
-BSD license, i.e. basically you can do whatever you want with it but I am not
-responsible.
-
-Installation
--------------
-
-If you are lazy, just perform
-
- ``$ pip install decorator``
-
-which will install just the module on your system.
-
-If you prefer to install the full distribution from source, including
-the documentation, clone the `GitHub repo`_ or download the tarball_, unpack it and run
-
- ``$ pip install .``
-
-in the main directory, possibly as superuser.
-
-.. _tarball: https://pypi.org/project/decorator/#files
-.. _GitHub repo: https://github.com/micheles/decorator
-
-Testing
---------
-
-If you have the source code installation you can run the tests with
-
- `$ python src/tests/test.py -v`
-
-or (if you have setuptools installed)
-
- `$ python setup.py test`
-
-Notice that you may run into trouble if in your system there
-is an older version of the decorator module; in such a case remove the
-old version. It is safe even to copy the module `decorator.py` over
-an existing one, since we kept backward-compatibility for a long time.
-
-Repository
----------------
-
-The project is hosted on GitHub. You can look at the source here:
-
- https://github.com/micheles/decorator
-
-Documentation
----------------
-
-The documentation has been moved to https://github.com/micheles/decorator/blob/master/docs/documentation.md
-
-From there you can get a PDF version by simply using the print
-functionality of your browser.
-
-Here is the documentation for previous versions of the module:
-
-https://github.com/micheles/decorator/blob/4.3.2/docs/tests.documentation.rst
-https://github.com/micheles/decorator/blob/4.2.1/docs/tests.documentation.rst
-https://github.com/micheles/decorator/blob/4.1.2/docs/tests.documentation.rst
-https://github.com/micheles/decorator/blob/4.0.0/documentation.rst
-https://github.com/micheles/decorator/blob/3.4.2/documentation.rst
-
-For the impatient
------------------
-
-Here is an example of how to define a family of decorators tracing slow
-operations:
-
-.. code-block:: python
-
- from decorator import decorator
-
- @decorator
- def warn_slow(func, timelimit=60, *args, **kw):
- t0 = time.time()
- result = func(*args, **kw)
- dt = time.time() - t0
- if dt > timelimit:
- logging.warn('%s took %d seconds', func.__name__, dt)
- else:
- logging.info('%s took %d seconds', func.__name__, dt)
- return result
-
- @warn_slow # warn if it takes more than 1 minute
- def preprocess_input_files(inputdir, tempdir):
- ...
-
- @warn_slow(timelimit=600) # warn if it takes more than 10 minutes
- def run_calculation(tempdir, outdir):
- ...
-
-Enjoy!
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/RECORD
deleted file mode 100644
index 23ed232f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/RECORD
+++ /dev/null
@@ -1,8 +0,0 @@
-decorator-4.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-decorator-4.4.2.dist-info/LICENSE.txt,sha256=_RFmDKvwUyCCxFcGhi-vwpSQfsf44heBgkCkmZgGeC4,1309
-decorator-4.4.2.dist-info/METADATA,sha256=RYLh5Qy8XzYOcgCT6RsI_cTXG_PE1QvoAVT-u2vus80,4168
-decorator-4.4.2.dist-info/RECORD,,
-decorator-4.4.2.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110
-decorator-4.4.2.dist-info/pbr.json,sha256=AL84oUUWQHwkd8OCPhLRo2NJjU5MDdmXMqRHv-posqs,47
-decorator-4.4.2.dist-info/top_level.txt,sha256=Kn6eQjo83ctWxXVyBMOYt0_YpjRjBznKYVuNyuC_DSI,10
-decorator.py,sha256=aQ8Ozc-EK26xBTOXVR5A-8Szgx99_bhaexZSGNn38Yc,17222
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/WHEEL
deleted file mode 100644
index 78e6f69d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.33.4)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/pbr.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/pbr.json
deleted file mode 100644
index cd045997..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/pbr.json
+++ /dev/null
@@ -1 +0,0 @@
-{"is_release": false, "git_version": "8608a46"}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/top_level.txt
deleted file mode 100644
index 3fe18a4d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator-4.4.2.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-decorator
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator.py
old mode 100644
new mode 100755
index b1f8b567..78d227f3
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/decorator.py
@@ -40,9 +40,9 @@
import itertools
import collections
-__version__ = '4.4.2'
+__version__ = '4.4.1'
-if sys.version_info >= (3,):
+if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
@@ -179,7 +179,8 @@ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (,
# , ) being unique.
- filename = '' % next(self._compile_count)
+ filename = '<%s:decorator-gen-%d>' % (
+ __file__, next(self._compile_count))
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/LICENSE
deleted file mode 100644
index 311690c6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/LICENSE
+++ /dev/null
@@ -1,49 +0,0 @@
-PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
---------------------------------------------
-
-1. This LICENSE AGREEMENT is between the Python Software Foundation
-("PSF"), and the Individual or Organization ("Licensee") accessing and
-otherwise using this software ("Python") in source or binary form and
-its associated documentation.
-
-2. Subject to the terms and conditions of this License Agreement, PSF
-hereby grants Licensee a nonexclusive, royalty-free, world-wide
-license to reproduce, analyze, test, perform and/or display publicly,
-prepare derivative works, distribute, and otherwise use Python
-alone or in any derivative version, provided, however, that PSF's
-License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Python Software Foundation;
-All Rights Reserved" are retained in Python alone or in any derivative
-version prepared by Licensee.
-
-3. In the event Licensee prepares a derivative work that is based on
-or incorporates Python or any part thereof, and wants to make
-the derivative work available to others as provided herein, then
-Licensee hereby agrees to include in any such work a brief summary of
-the changes made to Python.
-
-4. PSF is making Python available to Licensee on an "AS IS"
-basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
-IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
-DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
-FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
-INFRINGE ANY THIRD PARTY RIGHTS.
-
-5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
-FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
-A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
-OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-6. This License Agreement will automatically terminate upon a material
-breach of its terms and conditions.
-
-7. Nothing in this License Agreement shall be deemed to create any
-relationship of agency, partnership, or joint venture between PSF and
-Licensee. This License Agreement does not grant permission to use PSF
-trademarks or trade name in a trademark sense to endorse or promote
-products or services of Licensee, or any third party.
-
-8. By copying, installing or otherwise using Python, Licensee
-agrees to be bound by the terms and conditions of this License
-Agreement.
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/METADATA
deleted file mode 100644
index f916e891..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/METADATA
+++ /dev/null
@@ -1,978 +0,0 @@
-Metadata-Version: 2.1
-Name: defusedxml
-Version: 0.7.1
-Summary: XML bomb protection for Python stdlib modules
-Home-page: https://github.com/tiran/defusedxml
-Author: Christian Heimes
-Author-email: christian@python.org
-Maintainer: Christian Heimes
-Maintainer-email: christian@python.org
-License: PSFL
-Download-URL: https://pypi.python.org/pypi/defusedxml
-Keywords: xml bomb DoS
-Platform: all
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Python Software Foundation License
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Topic :: Text Processing :: Markup :: XML
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
-
-===================================================
-defusedxml -- defusing XML bombs and other exploits
-===================================================
-
-.. image:: https://img.shields.io/pypi/v/defusedxml.svg
- :target: https://pypi.org/project/defusedxml/
- :alt: Latest Version
-
-.. image:: https://img.shields.io/pypi/pyversions/defusedxml.svg
- :target: https://pypi.org/project/defusedxml/
- :alt: Supported Python versions
-
-.. image:: https://travis-ci.org/tiran/defusedxml.svg?branch=master
- :target: https://travis-ci.org/tiran/defusedxml
- :alt: Travis CI
-
-.. image:: https://codecov.io/github/tiran/defusedxml/coverage.svg?branch=master
- :target: https://codecov.io/github/tiran/defusedxml?branch=master
- :alt: codecov
-
-.. image:: https://img.shields.io/pypi/dm/defusedxml.svg
- :target: https://pypistats.org/packages/defusedxml
- :alt: PyPI downloads
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: black
-
-..
-
- "It's just XML, what could probably go wrong?"
-
-Christian Heimes
-
-Synopsis
-========
-
-The results of an attack on a vulnerable XML library can be fairly dramatic.
-With just a few hundred **Bytes** of XML data an attacker can occupy several
-**Gigabytes** of memory within **seconds**. An attacker can also keep
-CPUs busy for a long time with a small to medium size request. Under some
-circumstances it is even possible to access local files on your
-server, to circumvent a firewall, or to abuse services to rebound attacks to
-third parties.
-
-The attacks use and abuse less common features of XML and its parsers. The
-majority of developers are unacquainted with features such as processing
-instructions and entity expansions that XML inherited from SGML. At best
-they know about ```` from experience with HTML but they are not
-aware that a document type definition (DTD) can generate an HTTP request
-or load a file from the file system.
-
-None of the issues is new. They have been known for a long time. Billion
-laughs was first reported in 2003. Nevertheless some XML libraries and
-applications are still vulnerable and even heavy users of XML are
-surprised by these features. It's hard to say whom to blame for the
-situation. It's too short sighted to shift all blame on XML parsers and
-XML libraries for using insecure default settings. After all they
-properly implement XML specifications. Application developers must not rely
-that a library is always configured for security and potential harmful data
-by default.
-
-
-.. contents:: Table of Contents
- :depth: 2
-
-
-Attack vectors
-==============
-
-billion laughs / exponential entity expansion
----------------------------------------------
-
-The `Billion Laughs`_ attack -- also known as exponential entity expansion --
-uses multiple levels of nested entities. The original example uses 9 levels
-of 10 expansions in each level to expand the string ``lol`` to a string of
-3 * 10 :sup:`9` bytes, hence the name "billion laughs". The resulting string
-occupies 3 GB (2.79 GiB) of memory; intermediate strings require additional
-memory. Because most parsers don't cache the intermediate step for every
-expansion it is repeated over and over again. It increases the CPU load even
-more.
-
-An XML document of just a few hundred bytes can disrupt all services on a
-machine within seconds.
-
-Example XML::
-
-
-
-
-
- ]>
- &d;
-
-
-quadratic blowup entity expansion
----------------------------------
-
-A quadratic blowup attack is similar to a `Billion Laughs`_ attack; it abuses
-entity expansion, too. Instead of nested entities it repeats one large entity
-with a couple of thousand chars over and over again. The attack isn't as
-efficient as the exponential case but it avoids triggering countermeasures of
-parsers against heavily nested entities. Some parsers limit the depth and
-breadth of a single entity but not the total amount of expanded text
-throughout an entire XML document.
-
-A medium-sized XML document with a couple of hundred kilobytes can require a
-couple of hundred MB to several GB of memory. When the attack is combined
-with some level of nested expansion an attacker is able to achieve a higher
-ratio of success.
-
-::
-
-
- ]>
- &a;&a;&a;... repeat
-
-
-external entity expansion (remote)
-----------------------------------
-
-Entity declarations can contain more than just text for replacement. They can
-also point to external resources by public identifiers or system identifiers.
-System identifiers are standard URIs. When the URI is a URL (e.g. a
-``http://`` locator) some parsers download the resource from the remote
-location and embed them into the XML document verbatim.
-
-Simple example of a parsed external entity::
-
-
- ]>
- ⅇ
-
-The case of parsed external entities works only for valid XML content. The
-XML standard also supports unparsed external entities with a
-``NData declaration``.
-
-External entity expansion opens the door to plenty of exploits. An attacker
-can abuse a vulnerable XML library and application to rebound and forward
-network requests with the IP address of the server. It highly depends
-on the parser and the application what kind of exploit is possible. For
-example:
-
-* An attacker can circumvent firewalls and gain access to restricted
- resources as all the requests are made from an internal and trustworthy
- IP address, not from the outside.
-* An attacker can abuse a service to attack, spy on or DoS your servers but
- also third party services. The attack is disguised with the IP address of
- the server and the attacker is able to utilize the high bandwidth of a big
- machine.
-* An attacker can exhaust additional resources on the machine, e.g. with
- requests to a service that doesn't respond or responds with very large
- files.
-* An attacker may gain knowledge, when, how often and from which IP address
- an XML document is accessed.
-* An attacker could send mail from inside your network if the URL handler
- supports ``smtp://`` URIs.
-
-
-external entity expansion (local file)
---------------------------------------
-
-External entities with references to local files are a sub-case of external
-entity expansion. It's listed as an extra attack because it deserves extra
-attention. Some XML libraries such as lxml disable network access by default
-but still allow entity expansion with local file access by default. Local
-files are either referenced with a ``file://`` URL or by a file path (either
-relative or absolute).
-
-An attacker may be able to access and download all files that can be read by
-the application process. This may include critical configuration files, too.
-
-::
-
-
- ]>
- ⅇ
-
-
-DTD retrieval
--------------
-
-This case is similar to external entity expansion, too. Some XML libraries
-like Python's xml.dom.pulldom retrieve document type definitions from remote
-or local locations. Several attack scenarios from the external entity case
-apply to this issue as well.
-
-::
-
-
-
-
-
- text
-
-
-
-Python XML Libraries
-====================
-
-.. csv-table:: vulnerabilities and features
- :header: "kind", "sax", "etree", "minidom", "pulldom", "xmlrpc", "lxml", "genshi"
- :widths: 24, 7, 8, 8, 7, 8, 8, 8
- :stub-columns: 0
-
- "billion laughs", "**True**", "**True**", "**True**", "**True**", "**True**", "False (1)", "False (5)"
- "quadratic blowup", "**True**", "**True**", "**True**", "**True**", "**True**", "**True**", "False (5)"
- "external entity expansion (remote)", "**True**", "False (3)", "False (4)", "**True**", "false", "False (1)", "False (5)"
- "external entity expansion (local file)", "**True**", "False (3)", "False (4)", "**True**", "false", "**True**", "False (5)"
- "DTD retrieval", "**True**", "False", "False", "**True**", "false", "False (1)", "False"
- "gzip bomb", "False", "False", "False", "False", "**True**", "**partly** (2)", "False"
- "xpath support (7)", "False", "False", "False", "False", "False", "**True**", "False"
- "xsl(t) support (7)", "False", "False", "False", "False", "False", "**True**", "False"
- "xinclude support (7)", "False", "**True** (6)", "False", "False", "False", "**True** (6)", "**True**"
- "C library", "expat", "expat", "expat", "expat", "expat", "libxml2", "expat"
-
-1. Lxml is protected against billion laughs attacks and doesn't do network
- lookups by default.
-2. libxml2 and lxml are not directly vulnerable to gzip decompression bombs
- but they don't protect you against them either.
-3. xml.etree doesn't expand entities and raises a ParserError when an entity
- occurs.
-4. minidom doesn't expand entities and simply returns the unexpanded entity
- verbatim.
-5. genshi.input of genshi 0.6 doesn't support entity expansion and raises a
- ParserError when an entity occurs.
-6. Library has (limited) XInclude support but requires an additional step to
- process inclusion.
-7. These are features but they may introduce exploitable holes, see
- `Other things to consider`_
-
-
-Settings in standard library
-----------------------------
-
-
-xml.sax.handler Features
-........................
-
-feature_external_ges (http://xml.org/sax/features/external-general-entities)
- disables external entity expansion
-
-feature_external_pes (http://xml.org/sax/features/external-parameter-entities)
- the option is ignored and doesn't modify any functionality
-
-DOM xml.dom.xmlbuilder.Options
-..............................
-
-external_parameter_entities
- ignored
-
-external_general_entities
- ignored
-
-external_dtd_subset
- ignored
-
-entities
- unsure
-
-
-defusedxml
-==========
-
-The `defusedxml package`_ (`defusedxml on PyPI`_)
-contains several Python-only workarounds and fixes
-for denial of service and other vulnerabilities in Python's XML libraries.
-In order to benefit from the protection you just have to import and use the
-listed functions / classes from the right defusedxml module instead of the
-original module. Merely `defusedxml.xmlrpc`_ is implemented as monkey patch.
-
-Instead of::
-
- >>> from xml.etree.ElementTree import parse
- >>> et = parse(xmlfile)
-
-alter code to::
-
- >>> from defusedxml.ElementTree import parse
- >>> et = parse(xmlfile)
-
-Additionally the package has an **untested** function to monkey patch
-all stdlib modules with ``defusedxml.defuse_stdlib()``.
-
-All functions and parser classes accept three additional keyword arguments.
-They return either the same objects as the original functions or compatible
-subclasses.
-
-forbid_dtd (default: False)
- disallow XML with a ```` processing instruction and raise a
- *DTDForbidden* exception when a DTD processing instruction is found.
-
-forbid_entities (default: True)
- disallow XML with ```` declarations inside the DTD and raise an
- *EntitiesForbidden* exception when an entity is declared.
-
-forbid_external (default: True)
- disallow any access to remote or local resources in external entities
- or DTD and raising an *ExternalReferenceForbidden* exception when a DTD
- or entity references an external resource.
-
-
-defusedxml (package)
---------------------
-
-DefusedXmlException, DTDForbidden, EntitiesForbidden,
-ExternalReferenceForbidden, NotSupportedError
-
-defuse_stdlib() (*experimental*)
-
-
-defusedxml.cElementTree
------------------------
-
-**NOTE** ``defusedxml.cElementTree`` is deprecated and will be removed in a
-future release. Import from ``defusedxml.ElementTree`` instead.
-
-parse(), iterparse(), fromstring(), XMLParser
-
-
-defusedxml.ElementTree
------------------------
-
-parse(), iterparse(), fromstring(), XMLParser
-
-
-defusedxml.expatreader
-----------------------
-
-create_parser(), DefusedExpatParser
-
-
-defusedxml.sax
---------------
-
-parse(), parseString(), make_parser()
-
-
-defusedxml.expatbuilder
------------------------
-
-parse(), parseString(), DefusedExpatBuilder, DefusedExpatBuilderNS
-
-
-defusedxml.minidom
-------------------
-
-parse(), parseString()
-
-
-defusedxml.pulldom
-------------------
-
-parse(), parseString()
-
-
-defusedxml.xmlrpc
------------------
-
-The fix is implemented as monkey patch for the stdlib's xmlrpc package (3.x)
-or xmlrpclib module (2.x). The function `monkey_patch()` enables the fixes,
-`unmonkey_patch()` removes the patch and puts the code in its former state.
-
-The monkey patch protects against XML related attacks as well as
-decompression bombs and excessively large requests or responses. The default
-setting is 30 MB for requests, responses and gzip decompression. You can
-modify the default by changing the module variable `MAX_DATA`. A value of
-`-1` disables the limit.
-
-
-defusedxml.lxml
----------------
-
-**DEPRECATED** The module is deprecated and will be removed in a future
-release.
-
-The module acts as an *example* how you could protect code that uses
-lxml.etree. It implements a custom Element class that filters out
-Entity instances, a custom parser factory and a thread local storage for
-parser instances. It also has a check_docinfo() function which inspects
-a tree for internal or external DTDs and entity declarations. In order to
-check for entities lxml > 3.0 is required.
-
-parse(), fromstring()
-RestrictedElement, GlobalParserTLS, getDefaultParser(), check_docinfo()
-
-
-defusedexpat
-============
-
-The `defusedexpat package`_ (`defusedexpat on PyPI`_)
-comes with binary extensions and a
-`modified expat`_ library instead of the standard `expat parser`_. It's
-basically a stand-alone version of the patches for Python's standard
-library C extensions.
-
-Modifications in expat
-----------------------
-
-new definitions::
-
- XML_BOMB_PROTECTION
- XML_DEFAULT_MAX_ENTITY_INDIRECTIONS
- XML_DEFAULT_MAX_ENTITY_EXPANSIONS
- XML_DEFAULT_RESET_DTD
-
-new XML_FeatureEnum members::
-
- XML_FEATURE_MAX_ENTITY_INDIRECTIONS
- XML_FEATURE_MAX_ENTITY_EXPANSIONS
- XML_FEATURE_IGNORE_DTD
-
-new XML_Error members::
-
- XML_ERROR_ENTITY_INDIRECTIONS
- XML_ERROR_ENTITY_EXPANSION
-
-new API functions::
-
- int XML_GetFeature(XML_Parser parser,
- enum XML_FeatureEnum feature,
- long *value);
- int XML_SetFeature(XML_Parser parser,
- enum XML_FeatureEnum feature,
- long value);
- int XML_GetFeatureDefault(enum XML_FeatureEnum feature,
- long *value);
- int XML_SetFeatureDefault(enum XML_FeatureEnum feature,
- long value);
-
-XML_FEATURE_MAX_ENTITY_INDIRECTIONS
- Limit the amount of indirections that are allowed to occur during the
- expansion of a nested entity. A counter starts when an entity reference
- is encountered. It resets after the entity is fully expanded. The limit
- protects the parser against exponential entity expansion attacks (aka
- billion laughs attack). When the limit is exceeded the parser stops and
- fails with `XML_ERROR_ENTITY_INDIRECTIONS`.
- A value of 0 disables the protection.
-
- Supported range
- 0 .. UINT_MAX
- Default
- 40
-
-XML_FEATURE_MAX_ENTITY_EXPANSIONS
- Limit the total length of all entity expansions throughout the entire
- document. The lengths of all entities are accumulated in a parser variable.
- The setting protects against quadratic blowup attacks (lots of expansions
- of a large entity declaration). When the sum of all entities exceeds
- the limit, the parser stops and fails with `XML_ERROR_ENTITY_EXPANSION`.
- A value of 0 disables the protection.
-
- Supported range
- 0 .. UINT_MAX
- Default
- 8 MiB
-
-XML_FEATURE_RESET_DTD
- Reset all DTD information after the block has been parsed. When
- the flag is set (default: false) all DTD information after the
- endDoctypeDeclHandler has been called. The flag can be set inside the
- endDoctypeDeclHandler. Without DTD information any entity reference in
- the document body leads to `XML_ERROR_UNDEFINED_ENTITY`.
-
- Supported range
- 0, 1
- Default
- 0
-
-
-How to avoid XML vulnerabilities
-================================
-
-Best practices
---------------
-
-* Don't allow DTDs
-* Don't expand entities
-* Don't resolve externals
-* Limit parse depth
-* Limit total input size
-* Limit parse time
-* Favor a SAX or iterparse-like parser for potential large data
-* Validate and properly quote arguments to XSL transformations and
- XPath queries
-* Don't use XPath expression from untrusted sources
-* Don't apply XSL transformations that come untrusted sources
-
-(based on Brad Hill's `Attacking XML Security`_)
-
-
-Other things to consider
-========================
-
-XML, XML parsers and processing libraries have more features and possible
-issue that could lead to DoS vulnerabilities or security exploits in
-applications. I have compiled an incomplete list of theoretical issues that
-need further research and more attention. The list is deliberately pessimistic
-and a bit paranoid, too. It contains things that might go wrong under daffy
-circumstances.
-
-
-attribute blowup / hash collision attack
-----------------------------------------
-
-XML parsers may use an algorithm with quadratic runtime O(n :sup:`2`) to
-handle attributes and namespaces. If it uses hash tables (dictionaries) to
-store attributes and namespaces the implementation may be vulnerable to
-hash collision attacks, thus reducing the performance to O(n :sup:`2`) again.
-In either case an attacker is able to forge a denial of service attack with
-an XML document that contains thousands upon thousands of attributes in
-a single node.
-
-I haven't researched yet if expat, pyexpat or libxml2 are vulnerable.
-
-
-decompression bomb
-------------------
-
-The issue of decompression bombs (aka `ZIP bomb`_) apply to all XML libraries
-that can parse compressed XML stream like gzipped HTTP streams or LZMA-ed
-files. For an attacker it can reduce the amount of transmitted data by three
-magnitudes or more. Gzip is able to compress 1 GiB zeros to roughly 1 MB,
-lzma is even better::
-
- $ dd if=/dev/zero bs=1M count=1024 | gzip > zeros.gz
- $ dd if=/dev/zero bs=1M count=1024 | lzma -z > zeros.xy
- $ ls -sh zeros.*
- 1020K zeros.gz
- 148K zeros.xy
-
-None of Python's standard XML libraries decompress streams except for
-``xmlrpclib``. The module is vulnerable
-to decompression bombs.
-
-lxml can load and process compressed data through libxml2 transparently.
-libxml2 can handle even very large blobs of compressed data efficiently
-without using too much memory. But it doesn't protect applications from
-decompression bombs. A carefully written SAX or iterparse-like approach can
-be safe.
-
-
-Processing Instruction
-----------------------
-
-`PI`_'s like::
-
-
-
-may impose more threats for XML processing. It depends if and how a
-processor handles processing instructions. The issue of URL retrieval with
-network or local file access apply to processing instructions, too.
-
-
-Other DTD features
-------------------
-
-`DTD`_ has more features like ````. I haven't researched how
-these features may be a security threat.
-
-
-XPath
------
-
-XPath statements may introduce DoS vulnerabilities. Code should never execute
-queries from untrusted sources. An attacker may also be able to create an XML
-document that makes certain XPath queries costly or resource hungry.
-
-
-XPath injection attacks
------------------------
-
-XPath injeciton attacks pretty much work like SQL injection attacks.
-Arguments to XPath queries must be quoted and validated properly, especially
-when they are taken from the user. The page `Avoid the dangers of XPath injection`_
-list some ramifications of XPath injections.
-
-Python's standard library doesn't have XPath support. Lxml supports
-parameterized XPath queries which does proper quoting. You just have to use
-its xpath() method correctly::
-
- # DON'T
- >>> tree.xpath("/tag[@id='%s']" % value)
-
- # instead do
- >>> tree.xpath("/tag[@id=$tagid]", tagid=name)
-
-
-XInclude
---------
-
-`XML Inclusion`_ is another way to load and include external files::
-
-
-
-
-
-This feature should be disabled when XML files from an untrusted source are
-processed. Some Python XML libraries and libxml2 support XInclude but don't
-have an option to sandbox inclusion and limit it to allowed directories.
-
-
-XMLSchema location
-------------------
-
-A validating XML parser may download schema files from the information in a
-``xsi:schemaLocation`` attribute.
-
-::
-
-
-
-
-
-XSL Transformation
-------------------
-
-You should keep in mind that XSLT is a Turing complete language. Never
-process XSLT code from unknown or untrusted source! XSLT processors may
-allow you to interact with external resources in ways you can't even imagine.
-Some processors even support extensions that allow read/write access to file
-system, access to JRE objects or scripting with Jython.
-
-Example from `Attacking XML Security`_ for Xalan-J::
-
-
-
-
-
-
-
-
-
-
-
-Related CVEs
-============
-
-CVE-2013-1664
- Unrestricted entity expansion induces DoS vulnerabilities in Python XML
- libraries (XML bomb)
-
-CVE-2013-1665
- External entity expansion in Python XML libraries inflicts potential
- security flaws and DoS vulnerabilities
-
-
-Other languages / frameworks
-=============================
-
-Several other programming languages and frameworks are vulnerable as well. A
-couple of them are affected by the fact that libxml2 up to 2.9.0 has no
-protection against quadratic blowup attacks. Most of them have potential
-dangerous default settings for entity expansion and external entities, too.
-
-Perl
-----
-
-Perl's XML::Simple is vulnerable to quadratic entity expansion and external
-entity expansion (both local and remote).
-
-
-Ruby
-----
-
-Ruby's REXML document parser is vulnerable to entity expansion attacks
-(both quadratic and exponential) but it doesn't do external entity
-expansion by default. In order to counteract entity expansion you have to
-disable the feature::
-
- REXML::Document.entity_expansion_limit = 0
-
-libxml-ruby and hpricot don't expand entities in their default configuration.
-
-
-PHP
----
-
-PHP's SimpleXML API is vulnerable to quadratic entity expansion and loads
-entities from local and remote resources. The option ``LIBXML_NONET`` disables
-network access but still allows local file access. ``LIBXML_NOENT`` seems to
-have no effect on entity expansion in PHP 5.4.6.
-
-
-C# / .NET / Mono
-----------------
-
-Information in `XML DoS and Defenses (MSDN)`_ suggest that .NET is
-vulnerable with its default settings. The article contains code snippets
-how to create a secure XML reader::
-
- XmlReaderSettings settings = new XmlReaderSettings();
- settings.ProhibitDtd = false;
- settings.MaxCharactersFromEntities = 1024;
- settings.XmlResolver = null;
- XmlReader reader = XmlReader.Create(stream, settings);
-
-
-Java
-----
-
-Untested. The documentation of Xerces and its `Xerces SecurityMananger`_
-sounds like Xerces is also vulnerable to billion laugh attacks with its
-default settings. It also does entity resolving when an
-``org.xml.sax.EntityResolver`` is configured. I'm not yet sure about the
-default setting here.
-
-Java specialists suggest to have a custom builder factory::
-
- DocumentBuilderFactory builderFactory = DocumentBuilderFactory.newInstance();
- builderFactory.setXIncludeAware(False);
- builderFactory.setExpandEntityReferences(False);
- builderFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, True);
- # either
- builderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", True);
- # or if you need DTDs
- builderFactory.setFeature("http://xml.org/sax/features/external-general-entities", False);
- builderFactory.setFeature("http://xml.org/sax/features/external-parameter-entities", False);
- builderFactory.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", False);
- builderFactory.setFeature("http://apache.org/xml/features/nonvalidating/load-dtd-grammar", False);
-
-
-TODO
-====
-
-* DOM: Use xml.dom.xmlbuilder options for entity handling
-* SAX: take feature_external_ges and feature_external_pes (?) into account
-* test experimental monkey patching of stdlib modules
-* improve documentation
-
-
-License
-=======
-
-Copyright (c) 2013-2017 by Christian Heimes
-
-Licensed to PSF under a Contributor Agreement.
-
-See https://www.python.org/psf/license for licensing details.
-
-
-Acknowledgements
-================
-
-Brett Cannon (Python Core developer)
- review and code cleanup
-
-Antoine Pitrou (Python Core developer)
- code review
-
-Aaron Patterson, Ben Murphy and Michael Koziarski (Ruby community)
- Many thanks to Aaron, Ben and Michael from the Ruby community for their
- report and assistance.
-
-Thierry Carrez (OpenStack)
- Many thanks to Thierry for his report to the Python Security Response
- Team on behalf of the OpenStack security team.
-
-Carl Meyer (Django)
- Many thanks to Carl for his report to PSRT on behalf of the Django security
- team.
-
-Daniel Veillard (libxml2)
- Many thanks to Daniel for his insight and assistance with libxml2.
-
-semantics GmbH (https://www.semantics.de/)
- Many thanks to my employer semantics for letting me work on the issue
- during working hours as part of semantics's open source initiative.
-
-
-References
-==========
-
-* `XML DoS and Defenses (MSDN)`_
-* `Billion Laughs`_ on Wikipedia
-* `ZIP bomb`_ on Wikipedia
-* `Configure SAX parsers for secure processing`_
-* `Testing for XML Injection`_
-
-.. _defusedxml package: https://github.com/tiran/defusedxml
-.. _defusedxml on PyPI: https://pypi.python.org/pypi/defusedxml
-.. _defusedexpat package: https://github.com/tiran/defusedexpat
-.. _defusedexpat on PyPI: https://pypi.python.org/pypi/defusedexpat
-.. _modified expat: https://github.com/tiran/expat
-.. _expat parser: http://expat.sourceforge.net/
-.. _Attacking XML Security: https://www.isecpartners.com/media/12976/iSEC-HILL-Attacking-XML-Security-bh07.pdf
-.. _Billion Laughs: https://en.wikipedia.org/wiki/Billion_laughs
-.. _XML DoS and Defenses (MSDN): https://msdn.microsoft.com/en-us/magazine/ee335713.aspx
-.. _ZIP bomb: https://en.wikipedia.org/wiki/Zip_bomb
-.. _DTD: https://en.wikipedia.org/wiki/Document_Type_Definition
-.. _PI: https://en.wikipedia.org/wiki/Processing_Instruction
-.. _Avoid the dangers of XPath injection: http://www.ibm.com/developerworks/xml/library/x-xpathinjection/index.html
-.. _Configure SAX parsers for secure processing: http://www.ibm.com/developerworks/xml/library/x-tipcfsx/index.html
-.. _Testing for XML Injection: https://www.owasp.org/index.php/Testing_for_XML_Injection_(OWASP-DV-008)
-.. _Xerces SecurityMananger: https://xerces.apache.org/xerces2-j/javadocs/xerces2/org/apache/xerces/util/SecurityManager.html
-.. _XML Inclusion: https://www.w3.org/TR/xinclude/#include_element
-
-Changelog
-=========
-
-defusedxml 0.7.1
----------------------
-
-*Release date: 08-Mar-2021*
-
-- Fix regression ``defusedxml.ElementTree.ParseError`` (#63)
- The ``ParseError`` exception is now the same class object as
- ``xml.etree.ElementTree.ParseError`` again.
-
-
-defusedxml 0.7.0
-----------------
-
-*Release date: 4-Mar-2021*
-
-- No changes
-
-
-defusedxml 0.7.0rc2
--------------------
-
-*Release date: 12-Jan-2021*
-
-- Re-add and deprecate ``defusedxml.cElementTree``
-- Use GitHub Actions instead of TravisCI
-- Restore ``ElementTree`` attribute of ``xml.etree`` module after patching
-
-defusedxml 0.7.0rc1
--------------------
-
-*Release date: 04-May-2020*
-
-- Add support for Python 3.9
-- ``defusedxml.cElementTree`` is not available with Python 3.9.
-- Python 2 is deprecate. Support for Python 2 will be removed in 0.8.0.
-
-
-defusedxml 0.6.0
-----------------
-
-*Release date: 17-Apr-2019*
-
-- Increase test coverage.
-- Add badges to README.
-
-
-defusedxml 0.6.0rc1
--------------------
-
-*Release date: 14-Apr-2019*
-
-- Test on Python 3.7 stable and 3.8-dev
-- Drop support for Python 3.4
-- No longer pass *html* argument to XMLParse. It has been deprecated and
- ignored for a long time. The DefusedXMLParser still takes a html argument.
- A deprecation warning is issued when the argument is False and a TypeError
- when it's True.
-- defusedxml now fails early when pyexpat stdlib module is not available or
- broken.
-- defusedxml.ElementTree.__all__ now lists ParseError as public attribute.
-- The defusedxml.ElementTree and defusedxml.cElementTree modules had a typo
- and used XMLParse instead of XMLParser as an alias for DefusedXMLParser.
- Both the old and fixed name are now available.
-
-
-defusedxml 0.5.0
-----------------
-
-*Release date: 07-Feb-2017*
-
-- No changes
-
-
-defusedxml 0.5.0.rc1
---------------------
-
-*Release date: 28-Jan-2017*
-
-- Add compatibility with Python 3.6
-- Drop support for Python 2.6, 3.1, 3.2, 3.3
-- Fix lxml tests (XMLSyntaxError: Detected an entity reference loop)
-
-
-defusedxml 0.4.1
-----------------
-
-*Release date: 28-Mar-2013*
-
-- Add more demo exploits, e.g. python_external.py and Xalan XSLT demos.
-- Improved documentation.
-
-
-defusedxml 0.4
---------------
-
-*Release date: 25-Feb-2013*
-
-- As per http://seclists.org/oss-sec/2013/q1/340 please REJECT
- CVE-2013-0278, CVE-2013-0279 and CVE-2013-0280 and use CVE-2013-1664,
- CVE-2013-1665 for OpenStack/etc.
-- Add missing parser_list argument to sax.make_parser(). The argument is
- ignored, though. (thanks to Florian Apolloner)
-- Add demo exploit for external entity attack on Python's SAX parser, XML-RPC
- and WebDAV.
-
-
-defusedxml 0.3
---------------
-
-*Release date: 19-Feb-2013*
-
-- Improve documentation
-
-
-defusedxml 0.2
---------------
-
-*Release date: 15-Feb-2013*
-
-- Rename ExternalEntitiesForbidden to ExternalReferenceForbidden
-- Rename defusedxml.lxml.check_dtd() to check_docinfo()
-- Unify argument names in callbacks
-- Add arguments and formatted representation to exceptions
-- Add forbid_external argument to all functions and classes
-- More tests
-- LOTS of documentation
-- Add example code for other languages (Ruby, Perl, PHP) and parsers (Genshi)
-- Add protection against XML and gzip attacks to xmlrpclib
-
-defusedxml 0.1
---------------
-
-*Release date: 08-Feb-2013*
-
-- Initial and internal release for PSRT review
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/RECORD
deleted file mode 100644
index 0a2fa9c6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/RECORD
+++ /dev/null
@@ -1,17 +0,0 @@
-defusedxml-0.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-defusedxml-0.7.1.dist-info/LICENSE,sha256=uAzp2oxCofkQeWJ_u-K_JyEK4Qig_-Xwd9WwjgdsJMg,2409
-defusedxml-0.7.1.dist-info/METADATA,sha256=Np0872SHDa-En7pxHLjQWn7-PI2asPdjrcNAef43i7E,32518
-defusedxml-0.7.1.dist-info/RECORD,,
-defusedxml-0.7.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
-defusedxml-0.7.1.dist-info/top_level.txt,sha256=QGHa90F50pVKhWSFlERI0jtSKtqDiGyfeZX7dQNZAAw,11
-defusedxml/ElementTree.py,sha256=GLSqpCz58oXGPGyzf_HylsPS9_dcGVP5SN4dK7yvyPw,4640
-defusedxml/__init__.py,sha256=RczeaVJG64p2Fgy1jlCzbuRdchEPnEaCBrxgk8JJ_pM,1444
-defusedxml/cElementTree.py,sha256=PpaKMh3rU29sY8amAK4fzHQKl8gcAYD0h1LCoW62Rtk,1449
-defusedxml/common.py,sha256=3d26jNW4fNXzgjWhvUfs83Afiz5EVxFDupQbugkSMZc,4036
-defusedxml/expatbuilder.py,sha256=b4Q05vsBMJ5StkiTFf4my2rGGo1gZyEl_hC5MeFTOAA,3732
-defusedxml/expatreader.py,sha256=KOpSrwkSvj5SGOY9pTXOM26Dnz00rsJt33WueVvzpvc,2196
-defusedxml/lxml.py,sha256=HW-LFKdrfMRzHdi0Vcucq4-n8yz7v_OQwEQWFg1JQYA,4940
-defusedxml/minidom.py,sha256=3QcgygVwJqcWDQ3IZ2iol8zsH4cx3BRX70SPcd0bG2g,1884
-defusedxml/pulldom.py,sha256=DYj2D2lc7xoxZ38gfzujXmdznd8ovzDqGFXqyXbtxjk,1170
-defusedxml/sax.py,sha256=-SF08Msc2mWEYAMw62pJ5FMwWccOctFSnQwDLYLLlVE,1477
-defusedxml/xmlrpc.py,sha256=7rZQey3tqXcc1hrrM3RprOICU6fiFny9B9l4nmTioxA,5364
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/WHEEL
deleted file mode 100644
index ef99c6cf..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.34.2)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/top_level.txt
deleted file mode 100644
index 36969f2c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml-0.7.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-defusedxml
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/ElementTree.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/ElementTree.py
deleted file mode 100644
index 5ba765f1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/ElementTree.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.etree.ElementTree facade
-"""
-from __future__ import print_function, absolute_import
-
-import sys
-import warnings
-from xml.etree.ElementTree import ParseError
-from xml.etree.ElementTree import TreeBuilder as _TreeBuilder
-from xml.etree.ElementTree import parse as _parse
-from xml.etree.ElementTree import tostring
-
-from .common import PY3
-
-if PY3:
- import importlib
-else:
- from xml.etree.ElementTree import XMLParser as _XMLParser
- from xml.etree.ElementTree import iterparse as _iterparse
-
-
-from .common import (
- DTDForbidden,
- EntitiesForbidden,
- ExternalReferenceForbidden,
- _generate_etree_functions,
-)
-
-__origin__ = "xml.etree.ElementTree"
-
-
-def _get_py3_cls():
- """Python 3.3 hides the pure Python code but defusedxml requires it.
-
- The code is based on test.support.import_fresh_module().
- """
- pymodname = "xml.etree.ElementTree"
- cmodname = "_elementtree"
-
- pymod = sys.modules.pop(pymodname, None)
- cmod = sys.modules.pop(cmodname, None)
-
- sys.modules[cmodname] = None
- try:
- pure_pymod = importlib.import_module(pymodname)
- finally:
- # restore module
- sys.modules[pymodname] = pymod
- if cmod is not None:
- sys.modules[cmodname] = cmod
- else:
- sys.modules.pop(cmodname, None)
- # restore attribute on original package
- etree_pkg = sys.modules["xml.etree"]
- if pymod is not None:
- etree_pkg.ElementTree = pymod
- elif hasattr(etree_pkg, "ElementTree"):
- del etree_pkg.ElementTree
-
- _XMLParser = pure_pymod.XMLParser
- _iterparse = pure_pymod.iterparse
- # patch pure module to use ParseError from C extension
- pure_pymod.ParseError = ParseError
-
- return _XMLParser, _iterparse
-
-
-if PY3:
- _XMLParser, _iterparse = _get_py3_cls()
-
-
-_sentinel = object()
-
-
-class DefusedXMLParser(_XMLParser):
- def __init__(
- self,
- html=_sentinel,
- target=None,
- encoding=None,
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
- ):
- # Python 2.x old style class
- _XMLParser.__init__(self, target=target, encoding=encoding)
- if html is not _sentinel:
- # the 'html' argument has been deprecated and ignored in all
- # supported versions of Python. Python 3.8 finally removed it.
- if html:
- raise TypeError("'html=True' is no longer supported.")
- else:
- warnings.warn(
- "'html' keyword argument is no longer supported. Pass "
- "in arguments as keyword arguments.",
- category=DeprecationWarning,
- )
-
- self.forbid_dtd = forbid_dtd
- self.forbid_entities = forbid_entities
- self.forbid_external = forbid_external
- if PY3:
- parser = self.parser
- else:
- parser = self._parser
- if self.forbid_dtd:
- parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
- if self.forbid_entities:
- parser.EntityDeclHandler = self.defused_entity_decl
- parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
- if self.forbid_external:
- parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
-
- def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise DTDForbidden(name, sysid, pubid)
-
- def defused_entity_decl(
- self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
- ):
- raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
-
- def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- # expat 1.2
- raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
-
- def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
- raise ExternalReferenceForbidden(context, base, sysid, pubid)
-
-
-# aliases
-# XMLParse is a typo, keep it for backwards compatibility
-XMLTreeBuilder = XMLParse = XMLParser = DefusedXMLParser
-
-parse, iterparse, fromstring = _generate_etree_functions(
- DefusedXMLParser, _TreeBuilder, _parse, _iterparse
-)
-XML = fromstring
-
-
-__all__ = [
- "ParseError",
- "XML",
- "XMLParse",
- "XMLParser",
- "XMLTreeBuilder",
- "fromstring",
- "iterparse",
- "parse",
- "tostring",
-]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/__init__.py
deleted file mode 100644
index 4b5a2300..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/__init__.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defuse XML bomb denial of service vulnerabilities
-"""
-from __future__ import print_function, absolute_import
-
-import warnings
-
-from .common import (
- DefusedXmlException,
- DTDForbidden,
- EntitiesForbidden,
- ExternalReferenceForbidden,
- NotSupportedError,
- _apply_defusing,
-)
-
-
-def defuse_stdlib():
- """Monkey patch and defuse all stdlib packages
-
- :warning: The monkey patch is an EXPERIMETNAL feature.
- """
- defused = {}
-
- with warnings.catch_warnings():
- from . import cElementTree
- from . import ElementTree
- from . import minidom
- from . import pulldom
- from . import sax
- from . import expatbuilder
- from . import expatreader
- from . import xmlrpc
-
- xmlrpc.monkey_patch()
- defused[xmlrpc] = None
-
- defused_mods = [
- cElementTree,
- ElementTree,
- minidom,
- pulldom,
- sax,
- expatbuilder,
- expatreader,
- ]
-
- for defused_mod in defused_mods:
- stdlib_mod = _apply_defusing(defused_mod)
- defused[defused_mod] = stdlib_mod
-
- return defused
-
-
-__version__ = "0.7.1"
-
-__all__ = [
- "DefusedXmlException",
- "DTDForbidden",
- "EntitiesForbidden",
- "ExternalReferenceForbidden",
- "NotSupportedError",
-]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/cElementTree.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/cElementTree.py
deleted file mode 100644
index 84670c68..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/cElementTree.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.etree.cElementTree
-"""
-from __future__ import absolute_import
-
-import warnings
-
-from .common import _generate_etree_functions
-
-from xml.etree.cElementTree import TreeBuilder as _TreeBuilder
-from xml.etree.cElementTree import parse as _parse
-from xml.etree.cElementTree import tostring
-
-# iterparse from ElementTree!
-from xml.etree.ElementTree import iterparse as _iterparse
-
-# This module is an alias for ElementTree just like xml.etree.cElementTree
-from .ElementTree import (
- XML,
- XMLParse,
- XMLParser,
- XMLTreeBuilder,
- fromstring,
- iterparse,
- parse,
- tostring,
- DefusedXMLParser,
- ParseError,
-)
-
-__origin__ = "xml.etree.cElementTree"
-
-
-warnings.warn(
- "defusedxml.cElementTree is deprecated, import from defusedxml.ElementTree instead.",
- category=DeprecationWarning,
- stacklevel=2,
-)
-
-# XMLParse is a typo, keep it for backwards compatibility
-XMLTreeBuilder = XMLParse = XMLParser = DefusedXMLParser
-
-parse, iterparse, fromstring = _generate_etree_functions(
- DefusedXMLParser, _TreeBuilder, _parse, _iterparse
-)
-XML = fromstring
-
-__all__ = [
- "ParseError",
- "XML",
- "XMLParse",
- "XMLParser",
- "XMLTreeBuilder",
- "fromstring",
- "iterparse",
- "parse",
- "tostring",
-]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/common.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/common.py
deleted file mode 100644
index 5ceda1fb..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/common.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Common constants, exceptions and helpe functions
-"""
-import sys
-import xml.parsers.expat
-
-PY3 = sys.version_info[0] == 3
-
-# Fail early when pyexpat is not installed correctly
-if not hasattr(xml.parsers.expat, "ParserCreate"):
- raise ImportError("pyexpat") # pragma: no cover
-
-
-class DefusedXmlException(ValueError):
- """Base exception"""
-
- def __repr__(self):
- return str(self)
-
-
-class DTDForbidden(DefusedXmlException):
- """Document type definition is forbidden"""
-
- def __init__(self, name, sysid, pubid):
- super(DTDForbidden, self).__init__()
- self.name = name
- self.sysid = sysid
- self.pubid = pubid
-
- def __str__(self):
- tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
- return tpl.format(self.name, self.sysid, self.pubid)
-
-
-class EntitiesForbidden(DefusedXmlException):
- """Entity definition is forbidden"""
-
- def __init__(self, name, value, base, sysid, pubid, notation_name):
- super(EntitiesForbidden, self).__init__()
- self.name = name
- self.value = value
- self.base = base
- self.sysid = sysid
- self.pubid = pubid
- self.notation_name = notation_name
-
- def __str__(self):
- tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
- return tpl.format(self.name, self.sysid, self.pubid)
-
-
-class ExternalReferenceForbidden(DefusedXmlException):
- """Resolving an external reference is forbidden"""
-
- def __init__(self, context, base, sysid, pubid):
- super(ExternalReferenceForbidden, self).__init__()
- self.context = context
- self.base = base
- self.sysid = sysid
- self.pubid = pubid
-
- def __str__(self):
- tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
- return tpl.format(self.sysid, self.pubid)
-
-
-class NotSupportedError(DefusedXmlException):
- """The operation is not supported"""
-
-
-def _apply_defusing(defused_mod):
- assert defused_mod is sys.modules[defused_mod.__name__]
- stdlib_name = defused_mod.__origin__
- __import__(stdlib_name, {}, {}, ["*"])
- stdlib_mod = sys.modules[stdlib_name]
- stdlib_names = set(dir(stdlib_mod))
- for name, obj in vars(defused_mod).items():
- if name.startswith("_") or name not in stdlib_names:
- continue
- setattr(stdlib_mod, name, obj)
- return stdlib_mod
-
-
-def _generate_etree_functions(DefusedXMLParser, _TreeBuilder, _parse, _iterparse):
- """Factory for functions needed by etree, dependent on whether
- cElementTree or ElementTree is used."""
-
- def parse(source, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True):
- if parser is None:
- parser = DefusedXMLParser(
- target=_TreeBuilder(),
- forbid_dtd=forbid_dtd,
- forbid_entities=forbid_entities,
- forbid_external=forbid_external,
- )
- return _parse(source, parser)
-
- def iterparse(
- source,
- events=None,
- parser=None,
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
- ):
- if parser is None:
- parser = DefusedXMLParser(
- target=_TreeBuilder(),
- forbid_dtd=forbid_dtd,
- forbid_entities=forbid_entities,
- forbid_external=forbid_external,
- )
- return _iterparse(source, events, parser)
-
- def fromstring(text, forbid_dtd=False, forbid_entities=True, forbid_external=True):
- parser = DefusedXMLParser(
- target=_TreeBuilder(),
- forbid_dtd=forbid_dtd,
- forbid_entities=forbid_entities,
- forbid_external=forbid_external,
- )
- parser.feed(text)
- return parser.close()
-
- return parse, iterparse, fromstring
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatbuilder.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatbuilder.py
deleted file mode 100644
index 7bfc57e4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatbuilder.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.dom.expatbuilder
-"""
-from __future__ import print_function, absolute_import
-
-from xml.dom.expatbuilder import ExpatBuilder as _ExpatBuilder
-from xml.dom.expatbuilder import Namespaces as _Namespaces
-
-from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden
-
-__origin__ = "xml.dom.expatbuilder"
-
-
-class DefusedExpatBuilder(_ExpatBuilder):
- """Defused document builder"""
-
- def __init__(
- self, options=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
- ):
- _ExpatBuilder.__init__(self, options)
- self.forbid_dtd = forbid_dtd
- self.forbid_entities = forbid_entities
- self.forbid_external = forbid_external
-
- def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise DTDForbidden(name, sysid, pubid)
-
- def defused_entity_decl(
- self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
- ):
- raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
-
- def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- # expat 1.2
- raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
-
- def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
- raise ExternalReferenceForbidden(context, base, sysid, pubid)
-
- def install(self, parser):
- _ExpatBuilder.install(self, parser)
-
- if self.forbid_dtd:
- parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
- if self.forbid_entities:
- # if self._options.entities:
- parser.EntityDeclHandler = self.defused_entity_decl
- parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
- if self.forbid_external:
- parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
-
-
-class DefusedExpatBuilderNS(_Namespaces, DefusedExpatBuilder):
- """Defused document builder that supports namespaces."""
-
- def install(self, parser):
- DefusedExpatBuilder.install(self, parser)
- if self._options.namespace_declarations:
- parser.StartNamespaceDeclHandler = self.start_namespace_decl_handler
-
- def reset(self):
- DefusedExpatBuilder.reset(self)
- self._initNamespaces()
-
-
-def parse(file, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True):
- """Parse a document, returning the resulting Document node.
-
- 'file' may be either a file name or an open file object.
- """
- if namespaces:
- build_builder = DefusedExpatBuilderNS
- else:
- build_builder = DefusedExpatBuilder
- builder = build_builder(
- forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
- )
-
- if isinstance(file, str):
- fp = open(file, "rb")
- try:
- result = builder.parseFile(fp)
- finally:
- fp.close()
- else:
- result = builder.parseFile(file)
- return result
-
-
-def parseString(
- string, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True
-):
- """Parse a document from a string, returning the resulting
- Document node.
- """
- if namespaces:
- build_builder = DefusedExpatBuilderNS
- else:
- build_builder = DefusedExpatBuilder
- builder = build_builder(
- forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
- )
- return builder.parseString(string)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatreader.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatreader.py
deleted file mode 100644
index 890e1d16..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/expatreader.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.sax.expatreader
-"""
-from __future__ import print_function, absolute_import
-
-from xml.sax.expatreader import ExpatParser as _ExpatParser
-
-from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden
-
-__origin__ = "xml.sax.expatreader"
-
-
-class DefusedExpatParser(_ExpatParser):
- """Defused SAX driver for the pyexpat C module."""
-
- def __init__(
- self,
- namespaceHandling=0,
- bufsize=2 ** 16 - 20,
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
- ):
- _ExpatParser.__init__(self, namespaceHandling, bufsize)
- self.forbid_dtd = forbid_dtd
- self.forbid_entities = forbid_entities
- self.forbid_external = forbid_external
-
- def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise DTDForbidden(name, sysid, pubid)
-
- def defused_entity_decl(
- self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
- ):
- raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
-
- def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- # expat 1.2
- raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
-
- def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
- raise ExternalReferenceForbidden(context, base, sysid, pubid)
-
- def reset(self):
- _ExpatParser.reset(self)
- parser = self._parser
- if self.forbid_dtd:
- parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
- if self.forbid_entities:
- parser.EntityDeclHandler = self.defused_entity_decl
- parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
- if self.forbid_external:
- parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
-
-
-def create_parser(*args, **kwargs):
- return DefusedExpatParser(*args, **kwargs)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/lxml.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/lxml.py
deleted file mode 100644
index 99d5be93..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/lxml.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""DEPRECATED Example code for lxml.etree protection
-
-The code has NO protection against decompression bombs.
-"""
-from __future__ import print_function, absolute_import
-
-import threading
-import warnings
-
-from lxml import etree as _etree
-
-from .common import DTDForbidden, EntitiesForbidden, NotSupportedError
-
-LXML3 = _etree.LXML_VERSION[0] >= 3
-
-__origin__ = "lxml.etree"
-
-tostring = _etree.tostring
-
-
-warnings.warn(
- "defusedxml.lxml is no longer supported and will be removed in a future release.",
- category=DeprecationWarning,
- stacklevel=2,
-)
-
-
-class RestrictedElement(_etree.ElementBase):
- """A restricted Element class that filters out instances of some classes"""
-
- __slots__ = ()
- # blacklist = (etree._Entity, etree._ProcessingInstruction, etree._Comment)
- blacklist = _etree._Entity
-
- def _filter(self, iterator):
- blacklist = self.blacklist
- for child in iterator:
- if isinstance(child, blacklist):
- continue
- yield child
-
- def __iter__(self):
- iterator = super(RestrictedElement, self).__iter__()
- return self._filter(iterator)
-
- def iterchildren(self, tag=None, reversed=False):
- iterator = super(RestrictedElement, self).iterchildren(tag=tag, reversed=reversed)
- return self._filter(iterator)
-
- def iter(self, tag=None, *tags):
- iterator = super(RestrictedElement, self).iter(tag=tag, *tags)
- return self._filter(iterator)
-
- def iterdescendants(self, tag=None, *tags):
- iterator = super(RestrictedElement, self).iterdescendants(tag=tag, *tags)
- return self._filter(iterator)
-
- def itersiblings(self, tag=None, preceding=False):
- iterator = super(RestrictedElement, self).itersiblings(tag=tag, preceding=preceding)
- return self._filter(iterator)
-
- def getchildren(self):
- iterator = super(RestrictedElement, self).__iter__()
- return list(self._filter(iterator))
-
- def getiterator(self, tag=None):
- iterator = super(RestrictedElement, self).getiterator(tag)
- return self._filter(iterator)
-
-
-class GlobalParserTLS(threading.local):
- """Thread local context for custom parser instances"""
-
- parser_config = {
- "resolve_entities": False,
- # 'remove_comments': True,
- # 'remove_pis': True,
- }
-
- element_class = RestrictedElement
-
- def createDefaultParser(self):
- parser = _etree.XMLParser(**self.parser_config)
- element_class = self.element_class
- if self.element_class is not None:
- lookup = _etree.ElementDefaultClassLookup(element=element_class)
- parser.set_element_class_lookup(lookup)
- return parser
-
- def setDefaultParser(self, parser):
- self._default_parser = parser
-
- def getDefaultParser(self):
- parser = getattr(self, "_default_parser", None)
- if parser is None:
- parser = self.createDefaultParser()
- self.setDefaultParser(parser)
- return parser
-
-
-_parser_tls = GlobalParserTLS()
-getDefaultParser = _parser_tls.getDefaultParser
-
-
-def check_docinfo(elementtree, forbid_dtd=False, forbid_entities=True):
- """Check docinfo of an element tree for DTD and entity declarations
-
- The check for entity declarations needs lxml 3 or newer. lxml 2.x does
- not support dtd.iterentities().
- """
- docinfo = elementtree.docinfo
- if docinfo.doctype:
- if forbid_dtd:
- raise DTDForbidden(docinfo.doctype, docinfo.system_url, docinfo.public_id)
- if forbid_entities and not LXML3:
- # lxml < 3 has no iterentities()
- raise NotSupportedError("Unable to check for entity declarations " "in lxml 2.x")
-
- if forbid_entities:
- for dtd in docinfo.internalDTD, docinfo.externalDTD:
- if dtd is None:
- continue
- for entity in dtd.iterentities():
- raise EntitiesForbidden(entity.name, entity.content, None, None, None, None)
-
-
-def parse(source, parser=None, base_url=None, forbid_dtd=False, forbid_entities=True):
- if parser is None:
- parser = getDefaultParser()
- elementtree = _etree.parse(source, parser, base_url=base_url)
- check_docinfo(elementtree, forbid_dtd, forbid_entities)
- return elementtree
-
-
-def fromstring(text, parser=None, base_url=None, forbid_dtd=False, forbid_entities=True):
- if parser is None:
- parser = getDefaultParser()
- rootelement = _etree.fromstring(text, parser, base_url=base_url)
- elementtree = rootelement.getroottree()
- check_docinfo(elementtree, forbid_dtd, forbid_entities)
- return rootelement
-
-
-XML = fromstring
-
-
-def iterparse(*args, **kwargs):
- raise NotSupportedError("defused lxml.etree.iterparse not available")
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/minidom.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/minidom.py
deleted file mode 100644
index 78033b6c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/minidom.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.dom.minidom
-"""
-from __future__ import print_function, absolute_import
-
-from xml.dom.minidom import _do_pulldom_parse
-from . import expatbuilder as _expatbuilder
-from . import pulldom as _pulldom
-
-__origin__ = "xml.dom.minidom"
-
-
-def parse(
- file, parser=None, bufsize=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
-):
- """Parse a file into a DOM by filename or file object."""
- if parser is None and not bufsize:
- return _expatbuilder.parse(
- file,
- forbid_dtd=forbid_dtd,
- forbid_entities=forbid_entities,
- forbid_external=forbid_external,
- )
- else:
- return _do_pulldom_parse(
- _pulldom.parse,
- (file,),
- {
- "parser": parser,
- "bufsize": bufsize,
- "forbid_dtd": forbid_dtd,
- "forbid_entities": forbid_entities,
- "forbid_external": forbid_external,
- },
- )
-
-
-def parseString(
- string, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
-):
- """Parse a file into a DOM from a string."""
- if parser is None:
- return _expatbuilder.parseString(
- string,
- forbid_dtd=forbid_dtd,
- forbid_entities=forbid_entities,
- forbid_external=forbid_external,
- )
- else:
- return _do_pulldom_parse(
- _pulldom.parseString,
- (string,),
- {
- "parser": parser,
- "forbid_dtd": forbid_dtd,
- "forbid_entities": forbid_entities,
- "forbid_external": forbid_external,
- },
- )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/pulldom.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/pulldom.py
deleted file mode 100644
index e3b10a46..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/pulldom.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.dom.pulldom
-"""
-from __future__ import print_function, absolute_import
-
-from xml.dom.pulldom import parse as _parse
-from xml.dom.pulldom import parseString as _parseString
-from .sax import make_parser
-
-__origin__ = "xml.dom.pulldom"
-
-
-def parse(
- stream_or_string,
- parser=None,
- bufsize=None,
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
-):
- if parser is None:
- parser = make_parser()
- parser.forbid_dtd = forbid_dtd
- parser.forbid_entities = forbid_entities
- parser.forbid_external = forbid_external
- return _parse(stream_or_string, parser, bufsize)
-
-
-def parseString(
- string, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
-):
- if parser is None:
- parser = make_parser()
- parser.forbid_dtd = forbid_dtd
- parser.forbid_entities = forbid_entities
- parser.forbid_external = forbid_external
- return _parseString(string, parser)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/sax.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/sax.py
deleted file mode 100644
index b2786f74..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/sax.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xml.sax
-"""
-from __future__ import print_function, absolute_import
-
-from xml.sax import InputSource as _InputSource
-from xml.sax import ErrorHandler as _ErrorHandler
-
-from . import expatreader
-
-__origin__ = "xml.sax"
-
-
-def parse(
- source,
- handler,
- errorHandler=_ErrorHandler(),
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
-):
- parser = make_parser()
- parser.setContentHandler(handler)
- parser.setErrorHandler(errorHandler)
- parser.forbid_dtd = forbid_dtd
- parser.forbid_entities = forbid_entities
- parser.forbid_external = forbid_external
- parser.parse(source)
-
-
-def parseString(
- string,
- handler,
- errorHandler=_ErrorHandler(),
- forbid_dtd=False,
- forbid_entities=True,
- forbid_external=True,
-):
- from io import BytesIO
-
- if errorHandler is None:
- errorHandler = _ErrorHandler()
- parser = make_parser()
- parser.setContentHandler(handler)
- parser.setErrorHandler(errorHandler)
- parser.forbid_dtd = forbid_dtd
- parser.forbid_entities = forbid_entities
- parser.forbid_external = forbid_external
-
- inpsrc = _InputSource()
- inpsrc.setByteStream(BytesIO(string))
- parser.parse(inpsrc)
-
-
-def make_parser(parser_list=[]):
- return expatreader.create_parser()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/xmlrpc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/xmlrpc.py
deleted file mode 100644
index fbc674da..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/defusedxml/xmlrpc.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# defusedxml
-#
-# Copyright (c) 2013 by Christian Heimes
-# Licensed to PSF under a Contributor Agreement.
-# See https://www.python.org/psf/license for licensing details.
-"""Defused xmlrpclib
-
-Also defuses gzip bomb
-"""
-from __future__ import print_function, absolute_import
-
-import io
-
-from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden, PY3
-
-if PY3:
- __origin__ = "xmlrpc.client"
- from xmlrpc.client import ExpatParser
- from xmlrpc import client as xmlrpc_client
- from xmlrpc import server as xmlrpc_server
- from xmlrpc.client import gzip_decode as _orig_gzip_decode
- from xmlrpc.client import GzipDecodedResponse as _OrigGzipDecodedResponse
-else:
- __origin__ = "xmlrpclib"
- from xmlrpclib import ExpatParser
- import xmlrpclib as xmlrpc_client
-
- xmlrpc_server = None
- from xmlrpclib import gzip_decode as _orig_gzip_decode
- from xmlrpclib import GzipDecodedResponse as _OrigGzipDecodedResponse
-
-try:
- import gzip
-except ImportError: # pragma: no cover
- gzip = None
-
-
-# Limit maximum request size to prevent resource exhaustion DoS
-# Also used to limit maximum amount of gzip decoded data in order to prevent
-# decompression bombs
-# A value of -1 or smaller disables the limit
-MAX_DATA = 30 * 1024 * 1024 # 30 MB
-
-
-def defused_gzip_decode(data, limit=None):
- """gzip encoded data -> unencoded data
-
- Decode data using the gzip content encoding as described in RFC 1952
- """
- if not gzip: # pragma: no cover
- raise NotImplementedError
- if limit is None:
- limit = MAX_DATA
- f = io.BytesIO(data)
- gzf = gzip.GzipFile(mode="rb", fileobj=f)
- try:
- if limit < 0: # no limit
- decoded = gzf.read()
- else:
- decoded = gzf.read(limit + 1)
- except IOError: # pragma: no cover
- raise ValueError("invalid data")
- f.close()
- gzf.close()
- if limit >= 0 and len(decoded) > limit:
- raise ValueError("max gzipped payload length exceeded")
- return decoded
-
-
-class DefusedGzipDecodedResponse(gzip.GzipFile if gzip else object):
- """a file-like object to decode a response encoded with the gzip
- method, as described in RFC 1952.
- """
-
- def __init__(self, response, limit=None):
- # response doesn't support tell() and read(), required by
- # GzipFile
- if not gzip: # pragma: no cover
- raise NotImplementedError
- self.limit = limit = limit if limit is not None else MAX_DATA
- if limit < 0: # no limit
- data = response.read()
- self.readlength = None
- else:
- data = response.read(limit + 1)
- self.readlength = 0
- if limit >= 0 and len(data) > limit:
- raise ValueError("max payload length exceeded")
- self.stringio = io.BytesIO(data)
- gzip.GzipFile.__init__(self, mode="rb", fileobj=self.stringio)
-
- def read(self, n):
- if self.limit >= 0:
- left = self.limit - self.readlength
- n = min(n, left + 1)
- data = gzip.GzipFile.read(self, n)
- self.readlength += len(data)
- if self.readlength > self.limit:
- raise ValueError("max payload length exceeded")
- return data
- else:
- return gzip.GzipFile.read(self, n)
-
- def close(self):
- gzip.GzipFile.close(self)
- self.stringio.close()
-
-
-class DefusedExpatParser(ExpatParser):
- def __init__(self, target, forbid_dtd=False, forbid_entities=True, forbid_external=True):
- ExpatParser.__init__(self, target)
- self.forbid_dtd = forbid_dtd
- self.forbid_entities = forbid_entities
- self.forbid_external = forbid_external
- parser = self._parser
- if self.forbid_dtd:
- parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
- if self.forbid_entities:
- parser.EntityDeclHandler = self.defused_entity_decl
- parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
- if self.forbid_external:
- parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
-
- def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise DTDForbidden(name, sysid, pubid)
-
- def defused_entity_decl(
- self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
- ):
- raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
-
- def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- # expat 1.2
- raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
-
- def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
- raise ExternalReferenceForbidden(context, base, sysid, pubid)
-
-
-def monkey_patch():
- xmlrpc_client.FastParser = DefusedExpatParser
- xmlrpc_client.GzipDecodedResponse = DefusedGzipDecodedResponse
- xmlrpc_client.gzip_decode = defused_gzip_decode
- if xmlrpc_server:
- xmlrpc_server.gzip_decode = defused_gzip_decode
-
-
-def unmonkey_patch():
- xmlrpc_client.FastParser = None
- xmlrpc_client.GzipDecodedResponse = _OrigGzipDecodedResponse
- xmlrpc_client.gzip_decode = _orig_gzip_decode
- if xmlrpc_server:
- xmlrpc_server.gzip_decode = _orig_gzip_decode
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/distutils-precedence.pth b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/distutils-precedence.pth
deleted file mode 100644
index 7f009fe9..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/distutils-precedence.pth
+++ /dev/null
@@ -1 +0,0 @@
-import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim();
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/LICENSE.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/LICENSE.txt
deleted file mode 100644
index 4c904dba..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2013-2019 Python Charmers Pty Ltd, Australia
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/METADATA
deleted file mode 100644
index b6f83573..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/METADATA
+++ /dev/null
@@ -1,110 +0,0 @@
-Metadata-Version: 2.1
-Name: future
-Version: 0.18.2
-Summary: Clean single-source support for Python 3 and 2
-Home-page: https://python-future.org
-Author: Ed Schofield
-Author-email: ed@pythoncharmers.com
-License: MIT
-Keywords: future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2
-Platform: UNKNOWN
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: License :: OSI Approved
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
-License-File: LICENSE.txt
-
-
-future: Easy, safe support for Python 2/3 compatibility
-=======================================================
-
-``future`` is the missing compatibility layer between Python 2 and Python
-3. It allows you to use a single, clean Python 3.x-compatible codebase to
-support both Python 2 and Python 3 with minimal overhead.
-
-It is designed to be used as follows::
-
- from __future__ import (absolute_import, division,
- print_function, unicode_literals)
- from builtins import (
- bytes, dict, int, list, object, range, str,
- ascii, chr, hex, input, next, oct, open,
- pow, round, super,
- filter, map, zip)
-
-followed by predominantly standard, idiomatic Python 3 code that then runs
-similarly on Python 2.6/2.7 and Python 3.3+.
-
-The imports have no effect on Python 3. On Python 2, they shadow the
-corresponding builtins, which normally have different semantics on Python 3
-versus 2, to provide their Python 3 semantics.
-
-
-Standard library reorganization
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-``future`` supports the standard library reorganization (PEP 3108) through the
-following Py3 interfaces:
-
- >>> # Top-level packages with Py3 names provided on Py2:
- >>> import html.parser
- >>> import queue
- >>> import tkinter.dialog
- >>> import xmlrpc.client
- >>> # etc.
-
- >>> # Aliases provided for extensions to existing Py2 module names:
- >>> from future.standard_library import install_aliases
- >>> install_aliases()
-
- >>> from collections import Counter, OrderedDict # backported to Py2.6
- >>> from collections import UserDict, UserList, UserString
- >>> import urllib.request
- >>> from itertools import filterfalse, zip_longest
- >>> from subprocess import getoutput, getstatusoutput
-
-
-Automatic conversion
---------------------
-
-An included script called `futurize
-`_ aids in converting
-code (from either Python 2 or Python 3) to code compatible with both
-platforms. It is similar to ``python-modernize`` but goes further in
-providing Python 3 compatibility through the use of the backported types
-and builtin functions in ``future``.
-
-
-Documentation
--------------
-
-See: http://python-future.org
-
-
-Credits
--------
-
-:Author: Ed Schofield, Jordan M. Adler, et al
-:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
- Ltd, Singapore. http://pythoncharmers.com
-:Others: See docs/credits.rst or http://python-future.org/credits.html
-
-
-Licensing
----------
-Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
-The software is distributed under an MIT licence. See LICENSE.txt.
-
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/RECORD
deleted file mode 100644
index 09cd9f3b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/RECORD
+++ /dev/null
@@ -1,219 +0,0 @@
-../../bin/futurize,sha256=sXCNiOHh_TxI0O2jpXervN8SaTGHsk3s7DWZVaVd7ps,215
-../../bin/pasteurize,sha256=Z6X36i6UI3tjKEgkWJ-dYlE1hFprHZZaMzOq0Pr6yu8,217
-future-0.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-future-0.18.2.dist-info/LICENSE.txt,sha256=kW5WE5LUhHG5wjQ39W4mUvMgyzsRnOqhYu30EBb3Rrk,1083
-future-0.18.2.dist-info/METADATA,sha256=Xjjk3ziBhbMk6Wv0UPOWwVUsKGWBitr7WJrud7vWKss,3729
-future-0.18.2.dist-info/RECORD,,
-future-0.18.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-future-0.18.2.dist-info/entry_points.txt,sha256=-ATQtLUC2gkzrCYqc1Twac093xrI164NuMwsRALJWnM,89
-future-0.18.2.dist-info/top_level.txt,sha256=DT0C3az2gb-uJaj-fs0h4WwHYlJVDp0EvLdud1y5Zyw,38
-future/__init__.py,sha256=TsDq1XoGk6Jfach_rEhwAi07zR5OKYZ6hhUlG5Bj6Ag,2991
-future/backports/__init__.py,sha256=5QXvQ_jc5Xx6p4dSaHnZXPZazBEunKDKhbUjxZ0XD1I,530
-future/backports/_markupbase.py,sha256=MDPTCykLq4J7Aea3PvYotATEE0CG4R_SjlxfJaLXTJM,16215
-future/backports/datetime.py,sha256=I214Vu0cRY8mi8J5aIcsAyQJnWmOKXeLV-QTWSn7VQU,75552
-future/backports/email/__init__.py,sha256=eH3AJr3FkuBy_D6yS1V2K76Q2CQ93y2zmAMWmn8FbHI,2269
-future/backports/email/_encoded_words.py,sha256=m1vTRfxAQdg4VyWO7PF-1ih1mmq97V-BPyHHkuEwSME,8443
-future/backports/email/_header_value_parser.py,sha256=cj_1ce1voLn8H98r9cKqiSLgfFSxCv3_UL3sSvjqgjk,104692
-future/backports/email/_parseaddr.py,sha256=KewEnos0YDM-SYX503z7E1MmVbG5VRaKjxjcl0Ipjbs,17389
-future/backports/email/_policybase.py,sha256=2lJD9xouiz4uHvWGQ6j1nwlwWVQGwwzpy5JZoeQqhUc,14647
-future/backports/email/base64mime.py,sha256=sey6iJA9pHIOdFgoV1p7QAwYVjt8CEkDhITt304-nyI,3729
-future/backports/email/charset.py,sha256=CfE4iV2zAq6MQC0CHXHLnwTNW71zmhNITbzOcfxE4vY,17439
-future/backports/email/encoders.py,sha256=Nn4Pcx1rOdRgoSIzB6T5RWHl5zxClbf32wgE6D0tUt8,2800
-future/backports/email/errors.py,sha256=tRX8PP5g7mk2bAxL1jTCYrbfhD2gPZFNrh4_GJRM8OQ,3680
-future/backports/email/feedparser.py,sha256=bvmhb4cdY-ipextPK2K2sDgMsNvTspmuQfYyCxc4zSc,22736
-future/backports/email/generator.py,sha256=lpaLhZHneguvZ2QgRu7Figkjb7zmY28AGhj9iZTdI7s,19520
-future/backports/email/header.py,sha256=uBHbNKO-yx5I9KBflernJpyy3fX4gImCB1QE7ICApLs,24448
-future/backports/email/headerregistry.py,sha256=ZPbvLKXD0NMLSU4jXlVHfGyGcLMrFm-GQVURu_XHj88,20637
-future/backports/email/iterators.py,sha256=kMRYFGy3SVVpo7HG7JJr2ZAlOoaX6CVPzKYwDSvLfV0,2348
-future/backports/email/message.py,sha256=I6WW5cZDza7uwLOGJSvsDhGZC9K_Q570Lk2gt_vDUXM,35237
-future/backports/email/mime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/backports/email/mime/application.py,sha256=m-5a4mSxu2E32XAImnp9x9eMVX5Vme2iNgn2dMMNyss,1401
-future/backports/email/mime/audio.py,sha256=2ognalFRadcsUYQYMUZbjv5i1xJbFhQN643doMuI7M4,2815
-future/backports/email/mime/base.py,sha256=wV3ClQyMsOqmkXSXbk_wd_zPoPTvBx8kAIzq3rdM4lE,875
-future/backports/email/mime/image.py,sha256=DpQk1sB-IMmO43AF4uadsXyf_y5TdEzJLfyhqR48bIw,1907
-future/backports/email/mime/message.py,sha256=pFsMhXW07aRjsLq1peO847PApWFAl28-Z2Z7BP1Dn74,1429
-future/backports/email/mime/multipart.py,sha256=j4Lf_sJmuwTbfgdQ6R35_t1_ha2DynJBJDvpjwbNObE,1699
-future/backports/email/mime/nonmultipart.py,sha256=Ciba1Z8d2yLDDpxgDJuk3Bb-TqcpE9HCd8KfbW5vgl4,832
-future/backports/email/mime/text.py,sha256=zV98BjoR4S_nX8c47x43LnsnifeGhIfNGwSAh575bs0,1552
-future/backports/email/parser.py,sha256=-115SC3DHZ6lLijWFTxuOnE-GiM2BOYaUSz-QpmvYSo,5312
-future/backports/email/policy.py,sha256=gpcbhVRXuCohkK6MUqopTs1lv4E4-ZVUO6OVncoGEJE,8823
-future/backports/email/quoprimime.py,sha256=w93W5XgdFpyGaDqDBJrnXF_v_npH5r20WuAxmrAzyQg,10923
-future/backports/email/utils.py,sha256=vpfN0E8UjNbNw-2NFBQGCo4TNgrghMsqzpEYW5C_fBs,14270
-future/backports/html/__init__.py,sha256=FKwqFtWMCoGNkhU97OPnR1fZSh6etAKfN1FU1KvXcV8,924
-future/backports/html/entities.py,sha256=kzoRnQyGk_3DgoucHLhL5QL1pglK9nvmxhPIGZFDTnc,75428
-future/backports/html/parser.py,sha256=G2tUObvbHSotNt06JLY-BP1swaZNfDYFd_ENWDjPmRg,19770
-future/backports/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/backports/http/client.py,sha256=76EbhEZOtvdHFcU-jrjivoff13oQ9IMbdkZEdf5kQzQ,47602
-future/backports/http/cookiejar.py,sha256=_Vy4BPT-h0ZT0R_utGQAFXzuOAdmU9KedGFffyX9wN4,76559
-future/backports/http/cookies.py,sha256=DsyDUGDEbCXAA9Jq6suswSc76uSZqUu39adDDNj8XGw,21581
-future/backports/http/server.py,sha256=1CaMxgzHf9lYhmTJyE7topgjRIlIn9cnjgw8YEvwJV4,45523
-future/backports/misc.py,sha256=AkbED6BdHKnYCmIAontT4zHKTqdPPfJfn35HIs6LDrg,32682
-future/backports/socket.py,sha256=DH1V6IjKPpJ0tln8bYvxvQ7qnvZG-UoQtMA5yVleHiU,15663
-future/backports/socketserver.py,sha256=Twvyk5FqVnOeiNcbVsyMDPTF1mNlkKfyofG7tKxTdD8,24286
-future/backports/test/__init__.py,sha256=9dXxIZnkI095YfHC-XIaVF6d31GjeY1Ag8TEzcFgepM,264
-future/backports/test/badcert.pem,sha256=JioQeRZkHH8hGsWJjAF3U1zQvcWqhyzG6IOEJpTY9SE,1928
-future/backports/test/badkey.pem,sha256=gaBK9px_gG7DmrLKxfD6f6i-toAmARBTVfs-YGFRQF0,2162
-future/backports/test/dh512.pem,sha256=dUTsjtLbK-femrorUrTGF8qvLjhTiT_n4Uo5V6u__Gs,402
-future/backports/test/https_svn_python_org_root.pem,sha256=wOB3Onnc62Iu9kEFd8GcHhd_suucYjpJNA3jyfHeJWA,2569
-future/backports/test/keycert.passwd.pem,sha256=ZBfnVLpbBtAOf_2gCdiQ-yrBHmRsNzSf8VC3UpQZIjg,1830
-future/backports/test/keycert.pem,sha256=xPXi5idPcQVbrhgxBqF2TNGm6sSZ2aLVVEt6DWzplL8,1783
-future/backports/test/keycert2.pem,sha256=DB46FEAYv8BWwQJ-5RzC696FxPN7CON-Qsi-R4poJgc,1795
-future/backports/test/nokia.pem,sha256=s00x0uPDSaa5DHJ_CwzlVhg3OVdJ47f4zgqQdd0SAfQ,1923
-future/backports/test/nullbytecert.pem,sha256=NFRYWhmP_qT3jGfVjR6-iaC-EQdhIFjiXtTLN5ZPKnE,5435
-future/backports/test/nullcert.pem,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/backports/test/pystone.py,sha256=fvyoJ_tVovTNaxbJmdJMwr9F6SngY-U4ibULnd_wUqA,7427
-future/backports/test/sha256.pem,sha256=3wB-GQqEc7jq-PYwYAQaPbtTvvr7stk_DVmZxFgehfA,8344
-future/backports/test/ssl_cert.pem,sha256=M607jJNeIeHG9BlTf_jaQkPJI4nOxSJPn-zmEAaW43M,867
-future/backports/test/ssl_key.passwd.pem,sha256=I_WH4sBw9Vs9Z-BvmuXY0aw8tx8avv6rm5UL4S_pP00,963
-future/backports/test/ssl_key.pem,sha256=VKGU-R3UYaZpVTXl7chWl4vEYEDeob69SfvRTQ8aq_4,916
-future/backports/test/ssl_servers.py,sha256=-pd7HMZljuZfFRAbCAiAP_2G04orITJFj-S9ddr6o84,7209
-future/backports/test/support.py,sha256=zJrb-pz-Wu2dZwnNodg1v3w96zVq7ORuN-hOGOHbdA8,70881
-future/backports/total_ordering.py,sha256=O3M57_IisQ-zW5hW20uxkfk4fTGsr0EF2tAKx3BksQo,1929
-future/backports/urllib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/backports/urllib/error.py,sha256=ktikuK9ag4lS4f8Z0k5p1F11qF40N2AiOtjbXiF97ew,2715
-future/backports/urllib/parse.py,sha256=67avrYqV1UK7i_22goRUrvJ8SffzjRdTja9wzq_ynXY,35792
-future/backports/urllib/request.py,sha256=aR9ZMzfhV1C2Qk3wFsGvkwxqtdPTdsJVGRt5DUCwgJ8,96276
-future/backports/urllib/response.py,sha256=ooQyswwbb-9N6IVi1Kwjss1aR-Kvm8ZNezoyVEonp8c,3180
-future/backports/urllib/robotparser.py,sha256=pnAGTbKhdbCq_9yMZp7m8hj5q_NJpyQX6oQIZuYcnkw,6865
-future/backports/xmlrpc/__init__.py,sha256=h61ciVTdVvu8oEUXv4dHf_Tc5XUXDH3RKB1-8fQhSsg,38
-future/backports/xmlrpc/client.py,sha256=6a6Pvx_RVC9gIHDkFOVdREeGaZckOOiWd7T6GyzU3qU,48133
-future/backports/xmlrpc/server.py,sha256=W_RW5hgYbNV2LGbnvngzm7akacRdK-XFY-Cy2HL-qsY,37285
-future/builtins/__init__.py,sha256=jSdOucWfCsfkfTR8Jd4-Ls-YQpJ0AnzUomBxgwuoxNs,1687
-future/builtins/disabled.py,sha256=Ysq74bsmwntpq7dzkwTAD7IHKrkXy66vJlPshVwgVBI,2109
-future/builtins/iterators.py,sha256=l1Zawm2x82oqOuGGtCZRE76Ej98sMlHQwu9fZLK5RrA,1396
-future/builtins/misc.py,sha256=hctlKKWUyN0Eoodxg4ySQHEqARTukOLR4L5K5c6PW9k,4550
-future/builtins/new_min_max.py,sha256=7qQ4iiG4GDgRzjPzzzmg9pdby35Mtt6xNOOsyqHnIGY,1757
-future/builtins/newnext.py,sha256=oxXB8baXqJv29YG40aCS9UXk9zObyoOjya8BJ7NdBJM,2009
-future/builtins/newround.py,sha256=l2EXPAFU3fAsZigJxUH6x66B7jhNaB076-L5FR617R8,3181
-future/builtins/newsuper.py,sha256=LmiUQ_f6NXDIz6v6sDPkoTWl-2Zccy7PpZfQKYtscac,4146
-future/moves/__init__.py,sha256=MsAW69Xp_fqUo4xODufcKM6AZf-ozHaz44WPZdsDFJA,220
-future/moves/_dummy_thread.py,sha256=c8ZRUd8ffvyvGKGGgve5NKc8VdtAWquu8-4FnO2EdvA,175
-future/moves/_markupbase.py,sha256=W9wh_Gu3jDAMIhVBV1ZnCkJwYLHRk_v_su_HLALBkZQ,171
-future/moves/_thread.py,sha256=rwY7L4BZMFPlrp_i6T2Un4_iKYwnrXJ-yV6FJZN8YDo,163
-future/moves/builtins.py,sha256=4sjjKiylecJeL9da_RaBZjdymX2jtMs84oA9lCqb4Ug,281
-future/moves/collections.py,sha256=OKQ-TfUgms_2bnZRn4hrclLDoiN2i-HSWcjs3BC2iY8,417
-future/moves/configparser.py,sha256=TNy226uCbljjU-DjAVo7j7Effbj5zxXvDh0SdXehbzk,146
-future/moves/copyreg.py,sha256=Y3UjLXIMSOxZggXtvZucE9yv4tkKZtVan45z8eix4sU,438
-future/moves/dbm/__init__.py,sha256=_VkvQHC2UcIgZFPRroiX_P0Fs7HNqS_69flR0-oq2B8,488
-future/moves/dbm/dumb.py,sha256=HKdjjtO3EyP9EKi1Hgxh_eUU6yCQ0fBX9NN3n-zb8JE,166
-future/moves/dbm/gnu.py,sha256=XoCSEpZ2QaOgo2h1m80GW7NUgj_b93BKtbcuwgtnaKo,162
-future/moves/dbm/ndbm.py,sha256=OFnreyo_1YHDBl5YUm9gCzKlN1MHgWbfSQAZVls2jaM,162
-future/moves/html/__init__.py,sha256=BSUFSHxXf2kGvHozlnrB1nn6bPE6p4PpN3DwA_Z5geo,1016
-future/moves/html/entities.py,sha256=lVvchdjK_RzRj759eg4RMvGWHfgBbj0tKGOoZ8dbRyY,177
-future/moves/html/parser.py,sha256=V2XpHLKLCxQum3N9xlO3IUccAD7BIykZMqdEcWET3vY,167
-future/moves/http/__init__.py,sha256=Mx1v_Tcks4udHCtDM8q2xnYUiQ01gD7EpPyeQwsP3-Q,71
-future/moves/http/client.py,sha256=hqEBq7GDXZidd1AscKnSyjSoMcuj8rERqGTmD7VheDQ,165
-future/moves/http/cookiejar.py,sha256=Frr9ZZCg-145ymy0VGpiPJhvBEpJtVqRBYPaKhgT1Z4,173
-future/moves/http/cookies.py,sha256=PPrHa1_oDbu3D_BhJGc6PvMgY1KoxyYq1jqeJwEcMvE,233
-future/moves/http/server.py,sha256=8YQlSCShjAsB5rr5foVvZgp3IzwYFvTmGZCHhBSDtaI,606
-future/moves/itertools.py,sha256=PVxFHRlBQl9ElS0cuGFPcUtj53eHX7Z1DmggzGfgQ6c,158
-future/moves/pickle.py,sha256=r8j9skzfE8ZCeHyh_OB-WucOkRTIHN7zpRM7l7V3qS4,229
-future/moves/queue.py,sha256=uxvLCChF-zxWWgrY1a_wxt8rp2jILdwO4PrnkBW6VTE,160
-future/moves/reprlib.py,sha256=Nt5sUgMQ3jeVIukqSHOvB0UIsl6Y5t-mmT_13mpZmiY,161
-future/moves/socketserver.py,sha256=v8ZLurDxHOgsubYm1iefjlpnnJQcx2VuRUGt9FCJB9k,174
-future/moves/subprocess.py,sha256=oqRSMfFZkxM4MXkt3oD5N6eBwmmJ6rQ9KPhvSQKT_hM,251
-future/moves/sys.py,sha256=HOMRX4Loim75FMbWawd3oEwuGNJR-ClMREEFkVpBsRs,132
-future/moves/test/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110
-future/moves/test/support.py,sha256=6zGgTTXcERyBJIQ04-X-sAe781tVgLVHp3HzmQPy52g,259
-future/moves/tkinter/__init__.py,sha256=jV9vDx3wRl0bsoclU8oSe-5SqHQ3YpCbStmqtXnq1p4,620
-future/moves/tkinter/colorchooser.py,sha256=kprlmpRtvDbW5Gq43H1mi2KmNJ2kuzLQOba0a5EwDkU,333
-future/moves/tkinter/commondialog.py,sha256=mdUbq1IZqOGaSA7_8R367IukDCsMfzXiVHrTQQpp7Z0,333
-future/moves/tkinter/constants.py,sha256=0qRUrZLRPdVxueABL9KTzzEWEsk6xM1rOjxK6OHxXtA,324
-future/moves/tkinter/dialog.py,sha256=ksp-zvs-_A90P9RNHS8S27f1k8f48zG2Bel2jwZV5y0,311
-future/moves/tkinter/dnd.py,sha256=C_Ah0Urnyf2XKE5u-oP6mWi16RzMSXgMA1uhBSAwKY8,306
-future/moves/tkinter/filedialog.py,sha256=RSJFDGOP2AJ4T0ZscJ2hyF9ssOWp9t_S_DtnOmT-WZ8,323
-future/moves/tkinter/font.py,sha256=TXarflhJRxqepaRNSDw6JFUVGz5P1T1C4_uF9VRqj3w,309
-future/moves/tkinter/messagebox.py,sha256=WJt4t83kLmr_UnpCWFuLoyazZr3wAUOEl6ADn3osoEA,327
-future/moves/tkinter/scrolledtext.py,sha256=DRzN8aBAlDBUo1B2KDHzdpRSzXBfH4rOOz0iuHXbQcg,329
-future/moves/tkinter/simpledialog.py,sha256=6MhuVhZCJV4XfPpPSUWKlDLLGEi0Y2ZlGQ9TbsmJFL0,329
-future/moves/tkinter/tix.py,sha256=aNeOfbWSGmcN69UmEGf4tJ-QIxLT6SU5ynzm1iWgepA,302
-future/moves/tkinter/ttk.py,sha256=rRrJpDjcP2gjQNukECu4F026P-CkW-3Ca2tN6Oia-Fw,302
-future/moves/urllib/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110
-future/moves/urllib/error.py,sha256=gfrKzv-6W5OjzNIfjvJaQkxABRLym2KwjfKFXSdDB60,479
-future/moves/urllib/parse.py,sha256=xLLUMIIB5MreCdYzRZ5zIRWrhTRCoMO8RZEH4WPFQDY,1045
-future/moves/urllib/request.py,sha256=ttIzq60PwjRyrLQUGdAtfYvs4fziVwvcLe2Kw-hvE0g,3496
-future/moves/urllib/response.py,sha256=ZEZML0FpbB--GIeBFPvSzbtlVJ6EsR4tCI4qB7D8sFQ,342
-future/moves/urllib/robotparser.py,sha256=j24p6dMNzUpGZtT3BQxwRoE-F88iWmBpKgu0tRV61FQ,179
-future/moves/winreg.py,sha256=2zNAG59QI7vFlCj7kqDh0JrAYTpexOnI55PEAIjYhqo,163
-future/moves/xmlrpc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/moves/xmlrpc/client.py,sha256=2PfnL5IbKVwdKP7C8B1OUviEtuBObwoH4pAPfvHIvQc,143
-future/moves/xmlrpc/server.py,sha256=ESDXdpUgTKyeFmCDSnJmBp8zONjJklsRJOvy4OtaALc,143
-future/standard_library/__init__.py,sha256=7paz9IsD5qv_tvk5Rre3YrlA2_2aS1FJfI7UlrzAtWY,27743
-future/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-future/tests/base.py,sha256=7LTAKHJgUxOwmffD1kgcErVt2VouKcldPnq4iruqk_k,19956
-future/types/__init__.py,sha256=5fBxWqf_OTQ8jZ7k2TS34rFH14togeR488F4zBHIQ-s,6831
-future/types/newbytes.py,sha256=D_kNDD9sbNJir2cUxxePiAuw2OW5irxVnu55uHmuK9E,16303
-future/types/newdict.py,sha256=2N7P44cWmWtiDHvlK5ir15mW492gg6uP2n65d5bsDy4,3100
-future/types/newint.py,sha256=hJiv9qUDrjl1xkfzNFNLzafsRMPoFcRFceoivUzVIek,13286
-future/types/newlist.py,sha256=-H5-fXodd-UQgTFnZBJdwE68CrgIL_jViYdv4w7q2rU,2284
-future/types/newmemoryview.py,sha256=LnARgiKqQ2zLwwDZ3owu1atoonPQkOneWMfxJCwB4_o,712
-future/types/newobject.py,sha256=AX_n8GwlDR2IY-xIwZCvu0Olj_Ca2aS57nlTihnFr-I,3358
-future/types/newopen.py,sha256=lcRNHWZ1UjEn_0_XKis1ZA5U6l-4c-CHlC0WX1sY4NI,810
-future/types/newrange.py,sha256=7sgJaRaC4WIUtZ40K-c1d5QWruyaCWGgTVFadKo8qYA,5294
-future/types/newstr.py,sha256=e0brkurI0IK--4ToQEO4Cz1FECZav4CyUGMKxlrcmK4,15758
-future/utils/__init__.py,sha256=wsvXsKx-DXZichQ10Rdml-CWMqS79RNNynmdvfISpCU,21828
-future/utils/surrogateescape.py,sha256=7u4V4XlW83P5YSAJS2f92YUF8vsWthsiTnmAshOJL_M,6097
-libfuturize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31
-libfuturize/fixer_util.py,sha256=Zhms5G97l40pyG1krQM2lCp-TxnocBdJkB2AbkAFnKY,17494
-libfuturize/fixes/__init__.py,sha256=5KEpUnjVsFCCsr_-zrikvJbLf9zslEJnFTH_5pBc33I,5236
-libfuturize/fixes/fix_UserDict.py,sha256=jL4jXnGaUQTkG8RKfGXbU_HVTkB3MWZMQwUkqMAjB6I,3840
-libfuturize/fixes/fix_absolute_import.py,sha256=vkrF2FyQR5lSz2WmdqywzkEJVTC0eq4gh_REWBKHh7w,3140
-libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py,sha256=Fr219VAzR8KWXc2_bfiqLl10EgxAWjL6cI3Mowt--VU,662
-libfuturize/fixes/fix_basestring.py,sha256=bHkKuMzhr5FMXwjXlMOjsod4S3rQkVdbzhoWV4-tl3Y,394
-libfuturize/fixes/fix_bytes.py,sha256=AhzOJes6EnPwgzboDjvURANbWKqciG6ZGaYW07PYQK8,685
-libfuturize/fixes/fix_cmp.py,sha256=Blq_Z0IGkYiKS83QzZ5wUgpJyZfQiZoEsWJ1VPyXgFY,701
-libfuturize/fixes/fix_division.py,sha256=gnrAi7stquiVUyi_De1H8q--43iQaSUX0CjnOmQ6O2w,228
-libfuturize/fixes/fix_division_safe.py,sha256=Y_HUfQJAxRClXkcfqWP5SFCsRYZOsLUsNjLXlGOA3cQ,3292
-libfuturize/fixes/fix_execfile.py,sha256=I5AcJ6vPZ7i70TChaq9inxqnZ4C04-yJyfAItGa8E3c,921
-libfuturize/fixes/fix_future_builtins.py,sha256=QBCRpD9XA7tbtfP4wmOF2DXquB4lq-eupkQj-QAxp0s,2027
-libfuturize/fixes/fix_future_standard_library.py,sha256=FVtflFt38efHe_SEX6k3m6IYAtKWjA4rAPZrlCv6yA0,733
-libfuturize/fixes/fix_future_standard_library_urllib.py,sha256=Rf81XcAXA-vwNvrhskf5sLExbR--Wkr5fiUcMYGAKzs,1001
-libfuturize/fixes/fix_input.py,sha256=bhaPNtMrZNbjWIDQCR7Iue5BxBj4rf0RJQ9_jiwvb-s,687
-libfuturize/fixes/fix_metaclass.py,sha256=GLB76wbuyUVciDgW9bgNNOBEnLeS_AR-fKABcPBZk6M,9568
-libfuturize/fixes/fix_next_call.py,sha256=01STG86Av9o5QcpQDJ6UbPhvxt9kKrkatiPeddXRgvA,3158
-libfuturize/fixes/fix_object.py,sha256=qalFIjn0VTWXG5sGOOoCvO65omjX5_9d40SUpwUjBdw,407
-libfuturize/fixes/fix_oldstr_wrap.py,sha256=UCR6Q2l-pVqJSrRTnQAWMlaqBoX7oX1VpG_w6Q0XcyY,1214
-libfuturize/fixes/fix_order___future__imports.py,sha256=ACUCw5NEGWvj6XA9rNj8BYha3ktxLvkM5Ssh5cyV644,829
-libfuturize/fixes/fix_print.py,sha256=92s1w2t9SynA3Y1_85-lexSBbgEWJM6lBrhCxVacfDc,3384
-libfuturize/fixes/fix_print_with_import.py,sha256=hVWn70Q1DPMUiHMyEqgUx-6sM1AylLj78v9pMc4LFw8,735
-libfuturize/fixes/fix_raise.py,sha256=mEXpM9sS6tenMmxayfqM-Kp9gUvaztTY61vFaqyMUuo,3884
-libfuturize/fixes/fix_remove_old__future__imports.py,sha256=j4EC1KEVgXhuQAqhYHnAruUjW6uczPjV_fTCSOLMuAw,851
-libfuturize/fixes/fix_unicode_keep_u.py,sha256=M8fcFxHeFnWVOKoQRpkMsnpd9qmUFubI2oFhO4ZPk7A,779
-libfuturize/fixes/fix_unicode_literals_import.py,sha256=wq-hb-9Yx3Az4ol-ylXZJPEDZ81EaPZeIy5VvpA0CEY,367
-libfuturize/fixes/fix_xrange_with_import.py,sha256=f074qStjMz3OtLjt1bKKZSxQnRbbb7HzEbqHt9wgqdw,479
-libfuturize/main.py,sha256=feICmcv0dzWhutvwz0unnIVxusbSlQZFDaxObkHebs8,13733
-libpasteurize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31
-libpasteurize/fixes/__init__.py,sha256=ccdv-2MGjQMbq8XuEZBndHmbzGRrZnabksjXZLUv044,3719
-libpasteurize/fixes/feature_base.py,sha256=v7yLjBDBUPeNUc-YHGGlIsJDOQzFAM4Vo0RN5F1JHVU,1723
-libpasteurize/fixes/fix_add_all__future__imports.py,sha256=mHet1LgbHn9GfgCYGNZXKo-rseDWreAvUcAjZwdgeTE,676
-libpasteurize/fixes/fix_add_all_future_builtins.py,sha256=scfkY-Sz5j0yDtLYls2ENOcqEMPVxeDm9gFYYPINPB8,1269
-libpasteurize/fixes/fix_add_future_standard_library_import.py,sha256=thTRbkBzy_SJjZ0bJteTp0sBTx8Wr69xFakH4styf7Y,663
-libpasteurize/fixes/fix_annotations.py,sha256=VT_AorKY9AYWYZUZ17_CeUrJlEA7VGkwVLDQlwD1Bxo,1581
-libpasteurize/fixes/fix_division.py,sha256=_TD_c5KniAYqEm11O7NJF0v2WEhYSNkRGcKG_94ZOas,904
-libpasteurize/fixes/fix_features.py,sha256=NZn0n34_MYZpLNwyP1Tf51hOiN58Rg7A8tA9pK1S8-c,2675
-libpasteurize/fixes/fix_fullargspec.py,sha256=VlZuIU6QNrClmRuvC4mtLICL3yMCi-RcGCnS9fD4b-Q,438
-libpasteurize/fixes/fix_future_builtins.py,sha256=SlCK9I9u05m19Lr1wxlJxF8toZ5yu0yXBeDLxUN9_fw,1450
-libpasteurize/fixes/fix_getcwd.py,sha256=uebvTvFboLqsROFCwdnzoP6ThziM0skz9TDXHoJcFsQ,873
-libpasteurize/fixes/fix_imports.py,sha256=U4lIs_5Xp1qqM8mN72ieDkkIdiyALZFyCZsRC8ZmXlM,4944
-libpasteurize/fixes/fix_imports2.py,sha256=bs2V5Yv0v_8xLx-lNj9kNEAK2dLYXUXkZ2hxECg01CU,8580
-libpasteurize/fixes/fix_kwargs.py,sha256=NB_Ap8YJk-9ncoJRbOiPY_VMIigFgVB8m8AuY29DDhE,5991
-libpasteurize/fixes/fix_memoryview.py,sha256=Fwayx_ezpr22tbJ0-QrKdJ-FZTpU-m7y78l1h_N4xxc,551
-libpasteurize/fixes/fix_metaclass.py,sha256=IcE2KjaDG8jUR3FYXECzOC_cr2pr5r95W1NTbMrK8Wc,3260
-libpasteurize/fixes/fix_newstyle.py,sha256=78sazKOHm9DUoMyW4VdvQpMXZhicbXzorVPRhBpSUrM,888
-libpasteurize/fixes/fix_next.py,sha256=VHqcyORRNVqKJ51jJ1OkhwxHuXRgp8qaldyqcMvA4J0,1233
-libpasteurize/fixes/fix_printfunction.py,sha256=NDIfqVmUJBG3H9E6nrnN0cWZK8ch9pL4F-nMexdsa38,401
-libpasteurize/fixes/fix_raise.py,sha256=zQ_AcMsGmCbtKMgrxZGcHLYNscw6tqXFvHQxgqtNbU8,1099
-libpasteurize/fixes/fix_raise_.py,sha256=9STp633frUfYASjYzqhwxx_MXePNmMhfJClowRj8FLY,1225
-libpasteurize/fixes/fix_throw.py,sha256=_ZREVre-WttUvk4sWjrqUNqm9Q1uFaATECN0_-PXKbk,835
-libpasteurize/fixes/fix_unpacking.py,sha256=eMqRe44Nfq8lo0YFL9oKW75dGARmBSmklj4BCS_q1Lo,5946
-libpasteurize/main.py,sha256=dVHYTQQeJonuOFDNrenJZl-rKHgOQKRMPP1OqnJogWQ,8186
-past/__init__.py,sha256=wIiXaAvXl3svDi-fzuy6HDD0VsuCVr4cnqnCr8XINGI,2918
-past/builtins/__init__.py,sha256=7j_4OsUlN6q2eKr14do7mRQ1GwXRoXAMUR0A1fJpAls,1805
-past/builtins/misc.py,sha256=nw62HVSxuAgT-Q2lD3lmgRB9zmFXopS14dZHEv5xpDQ,2627
-past/builtins/noniterators.py,sha256=LtdELnd7KyYdXg7GkW25cgkEPUC0ggZ5AYMtDe9N95I,9370
-past/translation/__init__.py,sha256=j2e6mLeK74KEICqH6P_-tpKqSNZoMwip2toThhSmKpU,17646
-past/types/__init__.py,sha256=RyJlgqg9uJ8oF-kJT9QlfhfdmhiMh3fShmtvd2CQycY,879
-past/types/basestring.py,sha256=qrImcr24wvdDCMvF9x0Tyx8S1lCt6GIwRvzuAmvg_Tg,728
-past/types/olddict.py,sha256=0YtffZ55VY6AyQ_rwu4DZ4vcRsp6dz-dQzczeyN8hLk,2721
-past/types/oldstr.py,sha256=J2sJPC5jWEdpqXPcFwJFNDKn51TKhi86PsLFmJtQr-M,4332
-past/utils/__init__.py,sha256=e8l1sOfdiDJ3dkckBWLNWvC1ahC5BX5haHC2TGdNgA8,2633
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/WHEEL
deleted file mode 100644
index becc9a66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/entry_points.txt
deleted file mode 100644
index 45d1a880..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/entry_points.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-[console_scripts]
-futurize = libfuturize.main:main
-pasteurize = libpasteurize.main:main
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/top_level.txt
deleted file mode 100644
index 58f5843c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future-0.18.2.dist-info/top_level.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-future
-libfuturize
-libpasteurize
-past
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/__init__.py
old mode 100644
new mode 100755
index ad419d67..f7a6fbeb
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/__init__.py
@@ -68,7 +68,7 @@
Credits
-------
-:Author: Ed Schofield, Jordan M. Adler, et al
+:Author: Ed Schofield
:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
Ltd, Singapore. http://pythoncharmers.com
:Others: See docs/credits.rst or http://python-future.org/credits.html
@@ -76,7 +76,7 @@
Licensing
---------
-Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
+Copyright 2013-2018 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
@@ -84,10 +84,10 @@
__title__ = 'future'
__author__ = 'Ed Schofield'
__license__ = 'MIT'
-__copyright__ = 'Copyright 2013-2019 Python Charmers Pty Ltd'
+__copyright__ = 'Copyright 2013-2018 Python Charmers Pty Ltd'
__ver_major__ = 0
-__ver_minor__ = 18
-__ver_patch__ = 2
+__ver_minor__ = 17
+__ver_patch__ = 1
__ver_sub__ = ''
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
__ver_patch__, __ver_sub__)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/__init__.py
old mode 100644
new mode 100755
index c71e0653..68291141
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/__init__.py
@@ -10,7 +10,7 @@
from future.standard_library import import_top_level_modules
-if sys.version_info[0] >= 3:
+if sys.version_info[0] == 3:
import_top_level_modules()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/_markupbase.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/_markupbase.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/datetime.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/datetime.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_encoded_words.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_encoded_words.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_header_value_parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_header_value_parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_parseaddr.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_parseaddr.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_policybase.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/_policybase.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/base64mime.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/base64mime.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/charset.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/charset.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/encoders.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/encoders.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/errors.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/errors.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/feedparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/feedparser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/generator.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/generator.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/header.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/header.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/headerregistry.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/headerregistry.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/iterators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/iterators.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/message.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/message.py
old mode 100644
new mode 100755
index d8d9615d..99715fcc
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/message.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/message.py
@@ -800,7 +800,7 @@ def set_boundary(self, boundary):
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
- newparams = list()
+ newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
@@ -814,10 +814,10 @@ def set_boundary(self, boundary):
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
- newheaders = list()
+ newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
- parts = list()
+ parts = []
for k, v in newparams:
if v == '':
parts.append(k)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/application.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/application.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/audio.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/audio.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/base.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/base.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/image.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/image.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/message.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/message.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/multipart.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/multipart.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/nonmultipart.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/nonmultipart.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/text.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/mime/text.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/policy.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/policy.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/quoprimime.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/quoprimime.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/utils.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/email/utils.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/entities.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/entities.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/html/parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/client.py
old mode 100644
new mode 100755
index e663d125..5dd983d8
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/client.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/client.py
@@ -79,15 +79,11 @@
import io
import os
import socket
+import collections
from future.backports.urllib.parse import urlsplit
import warnings
from array import array
-if PY2:
- from collections import Iterable
-else:
- from collections.abc import Iterable
-
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
@@ -700,19 +696,9 @@ def _safe_readinto(self, b):
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
- if PY2:
- data = self.fp.read(len(temp_mvb))
- n = len(data)
- temp_mvb[:n] = data
- else:
- n = self.fp.readinto(temp_mvb)
+ n = self.fp.readinto(temp_mvb)
else:
- if PY2:
- data = self.fp.read(len(mvb))
- n = len(data)
- mvb[:n] = data
- else:
- n = self.fp.readinto(mvb)
+ n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
@@ -906,7 +892,7 @@ def send(self, data):
try:
self.sock.sendall(data)
except TypeError:
- if isinstance(data, Iterable):
+ if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookiejar.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookiejar.py
old mode 100644
new mode 100755
index af3ef415..cad72f9b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookiejar.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookiejar.py
@@ -33,7 +33,7 @@
from __future__ import division
from __future__ import absolute_import
from future.builtins import filter, int, map, open, str
-from future.utils import as_native_str, PY2
+from future.utils import as_native_str
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
@@ -41,8 +41,7 @@
import copy
import datetime
import re
-if PY2:
- re.ASCII = 0
+re.ASCII = 0
import time
from future.backports.urllib.parse import urlparse, urlsplit, quote
from future.backports.http.client import HTTP_PORT
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookies.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookies.py
old mode 100644
new mode 100755
index 8bb61e22..ae32ed7e
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookies.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/cookies.py
@@ -138,8 +138,7 @@
# Import our required modules
#
import re
-if PY2:
- re.ASCII = 0 # for py2 compatibility
+re.ASCII = 0 # for py2 compatibility
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/server.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/http/server.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/misc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/misc.py
old mode 100644
new mode 100755
index 098a0667..ef752078
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/misc.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/misc.py
@@ -16,6 +16,7 @@
import subprocess
from math import ceil as oldceil
+from collections import Mapping, MutableMapping
from operator import itemgetter as _itemgetter, eq as _eq
import sys
@@ -24,12 +25,7 @@
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from socket import getaddrinfo, SOCK_STREAM, error, socket
-from future.utils import iteritems, itervalues, PY2, PY26, PY3
-
-if PY2:
- from collections import Mapping, MutableMapping
-else:
- from collections.abc import Mapping, MutableMapping
+from future.utils import iteritems, itervalues, PY26, PY3
def ceil(x):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/socket.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/socket.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/socketserver.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/socketserver.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/total_ordering.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/total_ordering.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/error.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/error.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/parse.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/parse.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/request.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/request.py
old mode 100644
new mode 100755
index baee5401..b1545ca0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/request.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/request.py
@@ -109,17 +109,11 @@
import socket
import sys
import time
+import collections
import tempfile
import contextlib
import warnings
-from future.utils import PY2
-
-if PY2:
- from collections import Iterable
-else:
- from collections.abc import Iterable
-
# check for SSL
try:
import ssl
@@ -1227,7 +1221,7 @@ def do_request_(self, request):
mv = memoryview(data)
size = len(mv) * mv.itemsize
except TypeError:
- if isinstance(data, Iterable):
+ if isinstance(data, collections.Iterable):
raise ValueError("Content-Length should be specified "
"for iterable data of type %r %r" % (type(data),
data))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/response.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/response.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/robotparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/urllib/robotparser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/client.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/server.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/backports/xmlrpc/server.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/__init__.py
old mode 100644
new mode 100755
index 8bc1649d..216465a1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/__init__.py
@@ -11,7 +11,7 @@
# The isinstance import is no longer needed. We provide it only for
# backward-compatibility with future v0.8.2. It will be removed in future v1.0.
from future.builtins.misc import (ascii, chr, hex, input, isinstance, next,
- oct, open, pow, round, super, max, min)
+ oct, open, pow, round, super)
from future.utils import PY3
if PY3:
@@ -43,7 +43,7 @@
__all__ = ['filter', 'map', 'zip',
'ascii', 'chr', 'hex', 'input', 'next', 'oct', 'open', 'pow',
'round', 'super',
- 'bytes', 'dict', 'int', 'list', 'object', 'range', 'str', 'max', 'min'
+ 'bytes', 'dict', 'int', 'list', 'object', 'range', 'str',
]
else:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/disabled.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/disabled.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/iterators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/iterators.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/misc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/misc.py
old mode 100644
new mode 100755
index f86ce5f3..90dc384a
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/misc.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/misc.py
@@ -13,8 +13,6 @@
- ``open`` (equivalent to io.open on Py2)
- ``super`` (backport of Py3's magic zero-argument super() function
- ``round`` (new "Banker's Rounding" behaviour from Py3)
-- ``max`` (new default option from Py3.4)
-- ``min`` (new default option from Py3.4)
``isinstance`` is also currently exported for backwards compatibility
with v0.8.2, although this has been deprecated since v0.9.
@@ -61,8 +59,6 @@
from future.builtins.newnext import newnext as next
from future.builtins.newround import newround as round
from future.builtins.newsuper import newsuper as super
- from future.builtins.new_min_max import newmax as max
- from future.builtins.new_min_max import newmin as min
from future.types.newint import newint
_SENTINEL = object()
@@ -93,12 +89,11 @@ def pow(x, y, z=_SENTINEL):
else:
return _builtin_pow(x+0j, y, z)
-
# ``future`` doesn't support Py3.0/3.1. If we ever did, we'd add this:
# callable = __builtin__.callable
__all__ = ['ascii', 'chr', 'hex', 'input', 'isinstance', 'next', 'oct',
- 'open', 'pow', 'round', 'super', 'max', 'min']
+ 'open', 'pow', 'round', 'super']
else:
import builtins
@@ -114,14 +109,8 @@ def pow(x, y, z=_SENTINEL):
pow = builtins.pow
round = builtins.round
super = builtins.super
- if utils.PY34_PLUS:
- max = builtins.max
- min = builtins.min
- __all__ = []
- else:
- from future.builtins.new_min_max import newmax as max
- from future.builtins.new_min_max import newmin as min
- __all__ = ['min', 'max']
+
+ __all__ = []
# The callable() function was removed from Py3.0 and 3.1 and
# reintroduced into Py3.2+. ``future`` doesn't support Py3.0/3.1. If we ever
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/new_min_max.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/new_min_max.py
deleted file mode 100644
index 6f0c2a86..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/new_min_max.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import itertools
-
-from future import utils
-if utils.PY2:
- from __builtin__ import max as _builtin_max, min as _builtin_min
-else:
- from builtins import max as _builtin_max, min as _builtin_min
-
-_SENTINEL = object()
-
-
-def newmin(*args, **kwargs):
- return new_min_max(_builtin_min, *args, **kwargs)
-
-
-def newmax(*args, **kwargs):
- return new_min_max(_builtin_max, *args, **kwargs)
-
-
-def new_min_max(_builtin_func, *args, **kwargs):
- """
- To support the argument "default" introduced in python 3.4 for min and max
- :param _builtin_func: builtin min or builtin max
- :param args:
- :param kwargs:
- :return: returns the min or max based on the arguments passed
- """
-
- for key, _ in kwargs.items():
- if key not in set(['key', 'default']):
- raise TypeError('Illegal argument %s', key)
-
- if len(args) == 0:
- raise TypeError
-
- if len(args) != 1 and kwargs.get('default', _SENTINEL) is not _SENTINEL:
- raise TypeError
-
- if len(args) == 1:
- iterator = iter(args[0])
- try:
- first = next(iterator)
- except StopIteration:
- if kwargs.get('default', _SENTINEL) is not _SENTINEL:
- return kwargs.get('default')
- else:
- raise ValueError('{}() arg is an empty sequence'.format(_builtin_func.__name__))
- else:
- iterator = itertools.chain([first], iterator)
- if kwargs.get('key') is not None:
- return _builtin_func(iterator, key=kwargs.get('key'))
- else:
- return _builtin_func(iterator)
-
- if len(args) > 1:
- if kwargs.get('key') is not None:
- return _builtin_func(args, key=kwargs.get('key'))
- else:
- return _builtin_func(args)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newnext.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newnext.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newround.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newround.py
old mode 100644
new mode 100755
index 394a2c63..3943ebb6
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newround.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newround.py
@@ -38,14 +38,11 @@ def newround(number, ndigits=None):
if 'numpy' in repr(type(number)):
number = float(number)
- if isinstance(number, Decimal):
- d = number
+ if not PY26:
+ d = Decimal.from_float(number).quantize(exponent,
+ rounding=ROUND_HALF_EVEN)
else:
- if not PY26:
- d = Decimal.from_float(number).quantize(exponent,
- rounding=ROUND_HALF_EVEN)
- else:
- d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
+ d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newsuper.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/builtins/newsuper.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/__init__.py
old mode 100644
new mode 100755
index 0cd60d3d..040fdcf0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/__init__.py
@@ -4,5 +4,5 @@
__future_module__ = True
from future.standard_library import import_top_level_modules
-if sys.version_info[0] >= 3:
+if sys.version_info[0] == 3:
import_top_level_modules()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_dummy_thread.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_dummy_thread.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_markupbase.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_markupbase.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_thread.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/_thread.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/builtins.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/builtins.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/collections.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/collections.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/configparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/configparser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/copyreg.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/copyreg.py
old mode 100644
new mode 100755
index 9d08cdc5..21c7a42f
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/copyreg.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/copyreg.py
@@ -2,11 +2,7 @@
from future.utils import PY3
if PY3:
- import copyreg, sys
- # A "*" import uses Python 3's copyreg.__all__ which does not include
- # all public names in the API surface for copyreg, this avoids that
- # problem by just making our module _be_ a reference to the actual module.
- sys.modules['future.moves.copyreg'] = copyreg
+ from copyreg import *
else:
__future_module__ = True
from copy_reg import *
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/dumb.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/dumb.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/gnu.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/gnu.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/ndbm.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/dbm/ndbm.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/entities.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/entities.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/html/parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/client.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/cookiejar.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/cookiejar.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/cookies.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/cookies.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/server.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/http/server.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/itertools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/itertools.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/pickle.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/pickle.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/queue.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/queue.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/reprlib.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/reprlib.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/socketserver.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/socketserver.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/subprocess.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/subprocess.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/sys.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/sys.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/colorchooser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/colorchooser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/commondialog.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/commondialog.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/constants.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/constants.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/dialog.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/dialog.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/dnd.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/dnd.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/filedialog.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/filedialog.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/font.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/font.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/messagebox.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/messagebox.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/scrolledtext.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/scrolledtext.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/simpledialog.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/simpledialog.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/tix.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/tix.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/ttk.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/tkinter/ttk.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/error.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/error.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/parse.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/parse.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/request.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/request.py
old mode 100644
new mode 100755
index 972aa4ab..60e440a7
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/request.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/request.py
@@ -11,8 +11,19 @@
proxy_bypass,
quote,
request_host,
+ splitattr,
+ splithost,
+ splitpasswd,
+ splitport,
+ splitquery,
+ splittag,
+ splittype,
+ splituser,
+ splitvalue,
thishost,
+ to_bytes,
unquote,
+ unwrap,
url2pathname,
urlcleanup,
urljoin,
@@ -21,18 +32,6 @@
urlretrieve,
urlsplit,
urlunparse)
-
- from urllib.parse import (splitattr,
- splithost,
- splitpasswd,
- splitport,
- splitquery,
- splittag,
- splittype,
- splituser,
- splitvalue,
- to_bytes,
- unwrap)
else:
__future_module__ = True
with suspend_hooks():
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/response.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/response.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/robotparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/urllib/robotparser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/winreg.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/winreg.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/client.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/client.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/server.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/moves/xmlrpc/server.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/standard_library/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/standard_library/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/base.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/base.py
old mode 100644
new mode 100755
index 4ef437ba..9f4607b6
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/base.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/tests/base.py
@@ -272,11 +272,7 @@ def convert_check(self, before, expected, stages=(1, 2), all_imports=False,
else:
headers = ''
- reformatted = reformat_code(expected)
- if headers in reformatted:
- headers = ''
-
- self.compare(output, headers + reformatted,
+ self.compare(output, headers + reformat_code(expected),
ignore_imports=ignore_imports)
def unchanged(self, code, **kwargs):
@@ -342,10 +338,6 @@ def _futurize_test_script(self, filename='mytestscript.py', stages=(1, 2),
'----\n%s\n----' % f.read(),
)
ErrorClass = (FuturizeError if 'futurize' in script else PasteurizeError)
-
- if not hasattr(e, 'output'):
- # The attribute CalledProcessError.output doesn't exist on Py2.6
- e.output = None
raise ErrorClass(msg, e.returncode, e.cmd, output=e.output)
return output
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newbytes.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newbytes.py
old mode 100644
new mode 100755
index c9d584a7..2a337c86
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newbytes.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newbytes.py
@@ -5,19 +5,15 @@
different beast to the Python 3 bytes object.
"""
+from collections import Iterable
from numbers import Integral
import string
import copy
-from future.utils import istext, isbytes, PY2, PY3, with_metaclass
+from future.utils import istext, isbytes, PY3, with_metaclass
from future.types import no, issubset
from future.types.newobject import newobject
-if PY2:
- from collections import Iterable
-else:
- from collections.abc import Iterable
-
_builtin_bytes = bytes
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newdict.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newdict.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newint.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newint.py
old mode 100644
new mode 100755
index 748dba9d..705b8fa9
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newint.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newint.py
@@ -8,6 +8,7 @@
from __future__ import division
import struct
+import collections
from future.types.newbytes import newbytes
from future.types.newobject import newobject
@@ -16,9 +17,6 @@
if PY3:
long = int
- from collections.abc import Iterable
-else:
- from collections import Iterable
class BaseNewInt(type):
@@ -358,7 +356,7 @@ def from_bytes(cls, mybytes, byteorder='big', signed=False):
raise TypeError("cannot convert unicode objects to bytes")
# mybytes can also be passed as a sequence of integers on Py3.
# Test for this:
- elif isinstance(mybytes, Iterable):
+ elif isinstance(mybytes, collections.Iterable):
mybytes = newbytes(mybytes)
b = mybytes if byteorder == 'big' else mybytes[::-1]
if len(b) == 0:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newlist.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newlist.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newmemoryview.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newmemoryview.py
old mode 100644
new mode 100755
index 09f804dc..72c6990a
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newmemoryview.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newmemoryview.py
@@ -1,16 +1,14 @@
"""
A pretty lame implementation of a memoryview object for Python 2.6.
"""
+
+from collections import Iterable
from numbers import Integral
import string
-from future.utils import istext, isbytes, PY2, with_metaclass
+from future.utils import istext, isbytes, PY3, with_metaclass
from future.types import no, issubset
-if PY2:
- from collections import Iterable
-else:
- from collections.abc import Iterable
# class BaseNewBytes(type):
# def __instancecheck__(cls, instance):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newobject.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newobject.py
old mode 100644
new mode 100755
index 31b84fc1..776d4766
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newobject.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newobject.py
@@ -112,6 +112,5 @@ def __native__(self):
"""
return object(self)
- __slots__ = []
__all__ = ['newobject']
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newopen.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newopen.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newrange.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newrange.py
old mode 100644
new mode 100755
index eda01a5a..9173b050
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newrange.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newrange.py
@@ -19,12 +19,7 @@
"""
from __future__ import absolute_import
-from future.utils import PY2
-
-if PY2:
- from collections import Sequence, Iterator
-else:
- from collections.abc import Sequence, Iterator
+from collections import Sequence, Iterator
from itertools import islice
from future.backports.misc import count # with step parameter on Py2.6
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newstr.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newstr.py
old mode 100644
new mode 100755
index 8ca191f9..e6272fb9
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newstr.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/types/newstr.py
@@ -40,6 +40,7 @@
"""
+from collections import Iterable
from numbers import Number
from future.utils import PY3, istext, with_metaclass, isnewbytes
@@ -50,9 +51,6 @@
if PY3:
# We'll probably never use newstr on Py3 anyway...
unicode = str
- from collections.abc import Iterable
-else:
- from collections import Iterable
class BaseNewStr(type):
@@ -107,7 +105,6 @@ def __repr__(self):
"""
Without the u prefix
"""
-
value = super(newstr, self).__repr__()
# assert value[0] == u'u'
return value[1:]
@@ -293,14 +290,7 @@ def __eq__(self, other):
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__eq__(other)
else:
- return NotImplemented
-
- def __hash__(self):
- if (isinstance(self, unicode) or
- isinstance(self, bytes) and not isnewbytes(self)):
- return super(newstr, self).__hash__()
- else:
- raise NotImplementedError()
+ return False
def __ne__(self, other):
if (isinstance(other, unicode) or
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/__init__.py
old mode 100644
new mode 100755
index 46bd96de..906f1e46
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/__init__.py
@@ -18,10 +18,8 @@
* types:
* text_type: unicode in Python 2, str in Python 3
- * string_types: basestring in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
- * integer_types: (int, long) in Python 2, int in Python 3
- * class_types: (type, types.ClassType) in Python 2, type in Python 3
+ * string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
@@ -57,8 +55,7 @@
import inspect
-PY3 = sys.version_info[0] >= 3
-PY34_PLUS = sys.version_info[0:2] >= (3, 4)
+PY3 = sys.version_info[0] == 3
PY35_PLUS = sys.version_info[0:2] >= (3, 5)
PY36_PLUS = sys.version_info[0:2] >= (3, 6)
PY2 = sys.version_info[0] == 2
@@ -408,34 +405,12 @@ def raise_(tp, value=None, tb=None):
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
- if isinstance(tp, BaseException):
- # If the first object is an instance, the type of the exception
- # is the class of the instance, the instance itself is the value,
- # and the second object must be None.
- if value is not None:
- raise TypeError("instance exception may not have a separate value")
- exc = tp
- elif isinstance(tp, type) and not issubclass(tp, BaseException):
- # If the first object is a class, it becomes the type of the
- # exception.
- raise TypeError("class must derive from BaseException, not %s" % tp.__name__)
+ if value is not None and isinstance(tp, Exception):
+ raise TypeError("instance exception may not have a separate value")
+ if value is not None:
+ exc = tp(value)
else:
- # The second object is used to determine the exception value: If it
- # is an instance of the class, the instance becomes the exception
- # value. If the second object is a tuple, it is used as the argument
- # list for the class constructor; if it is None, an empty argument
- # list is used, and any other object is treated as a single argument
- # to the constructor. The instance so created by calling the
- # constructor is used as the exception value.
- if isinstance(value, tp):
- exc = value
- elif isinstance(value, tuple):
- exc = tp(*value)
- elif value is None:
- exc = tp()
- else:
- exc = tp(value)
-
+ exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
@@ -468,14 +443,12 @@ def raise_from(exc, cause):
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
- e.__cause__.__traceback__ = sys.exc_info()[2]
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
- object.__setattr__(e.__cause__, '__traceback__', sys.exc_info()[2])
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
@@ -579,14 +552,15 @@ def isbytes(obj):
def isnewbytes(obj):
"""
- Equivalent to the result of ``type(obj) == type(newbytes)``
- in other words, it is REALLY a newbytes instance, not a Py2 native str
+ Equivalent to the result of ``isinstance(obj, newbytes)`` were
+ ``__instancecheck__`` not overridden on the newbytes subclass. In
+ other words, it is REALLY a newbytes instance, not a Py2 native str
object?
-
- Note that this does not cover subclasses of newbytes, and it is not
- equivalent to ininstance(obj, newbytes)
"""
- return type(obj).__name__ == 'newbytes'
+ # TODO: generalize this so that it works with subclasses of newbytes
+ # Import is here to avoid circular imports:
+ from future.types.newbytes import newbytes
+ return type(obj) == newbytes
def isint(obj):
@@ -752,16 +726,16 @@ def ensure_new_type(obj):
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
- 'as_native_str', 'binary_type', 'bind_method', 'bord', 'bstr',
- 'bytes_to_native_str', 'class_types', 'encode_filename',
- 'ensure_new_type', 'exec_', 'get_next', 'getexception',
- 'implements_iterator', 'integer_types', 'is_new_style', 'isbytes',
- 'isidentifier', 'isint', 'isnewbytes', 'istext', 'iteritems',
- 'iterkeys', 'itervalues', 'lfilter', 'listitems', 'listvalues',
- 'lmap', 'lrange', 'lzip', 'native', 'native_bytes', 'native_str',
+ 'as_native_str', 'bind_method', 'bord', 'bstr',
+ 'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
+ 'exec_', 'get_next', 'getexception', 'implements_iterator',
+ 'is_new_style', 'isbytes', 'isidentifier', 'isint',
+ 'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
+ 'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
+ 'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
- 'raise_with_traceback', 'reraise', 'string_types',
- 'text_to_native_str', 'text_type', 'tobytes', 'viewitems',
- 'viewkeys', 'viewvalues', 'with_metaclass'
- ]
+ 'raise_with_traceback', 'reraise', 'text_to_native_str',
+ 'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
+ 'with_metaclass'
+ ]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/surrogateescape.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/surrogateescape.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/LICENSE
deleted file mode 100644
index ae382866..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Httplib2 Software License
-
-Copyright (c) 2006 by Joe Gregorio
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of the Software,
-and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/METADATA
deleted file mode 100644
index 276c4430..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/METADATA
+++ /dev/null
@@ -1,71 +0,0 @@
-Metadata-Version: 2.1
-Name: httplib2
-Version: 0.19.1
-Summary: A comprehensive HTTP client library.
-Home-page: https://github.com/httplib2/httplib2
-Author: Joe Gregorio
-Author-email: joe@bitworking.org
-License: MIT
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Topic :: Internet :: WWW/HTTP
-Classifier: Topic :: Software Development :: Libraries
-Requires-Dist: pyparsing (<3,>=2.4.2)
-
-
-
-A comprehensive HTTP client library, ``httplib2`` supports many features left out of other HTTP libraries.
-
-**HTTP and HTTPS**
- HTTPS support is only available if the socket module was compiled with SSL support.
-
-
-**Keep-Alive**
- Supports HTTP 1.1 Keep-Alive, keeping the socket open and performing multiple requests over the same connection if possible.
-
-
-**Authentication**
- The following three types of HTTP Authentication are supported. These can be used over both HTTP and HTTPS.
-
- * Digest
- * Basic
- * WSSE
-
-**Caching**
- The module can optionally operate with a private cache that understands the Cache-Control:
- header and uses both the ETag and Last-Modified cache validators. Both file system
- and memcached based caches are supported.
-
-
-**All Methods**
- The module can handle any HTTP request method, not just GET and POST.
-
-
-**Redirects**
- Automatically follows 3XX redirects on GETs.
-
-
-**Compression**
- Handles both 'deflate' and 'gzip' types of compression.
-
-
-**Lost update support**
- Automatically adds back ETags into PUT requests to resources we have already cached. This implements Section 3.2 of Detecting the Lost Update Problem Using Unreserved Checkout
-
-
-**Unit Tested**
- A large and growing set of unit tests.
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/RECORD
deleted file mode 100644
index dd4f6146..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/RECORD
+++ /dev/null
@@ -1,13 +0,0 @@
-httplib2-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-httplib2-0.19.1.dist-info/LICENSE,sha256=WJ7sOPct8r4gNxHTuMvs6bkIxef_ALw8q39juunjZrQ,1086
-httplib2-0.19.1.dist-info/METADATA,sha256=Y773x9o8W64zxHwc9LyJIyAzCAQsnyvWDyrUs7l1l50,2235
-httplib2-0.19.1.dist-info/RECORD,,
-httplib2-0.19.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
-httplib2-0.19.1.dist-info/top_level.txt,sha256=BEY8ChKwagUWmu9x8yN9JObJpZKNeWCr1E-sIECb56I,9
-httplib2/__init__.py,sha256=BIXewNb18bHReQm1J-NWasURlkO0UjjEgup2UadKP88,68412
-httplib2/auth.py,sha256=IdJCKqMC2nx7O5wbYwfO04m1X3miYW5JAZ9Wn5eQZi4,2026
-httplib2/cacerts.txt,sha256=AQyadVjp1sEIG0yIiMJ82l52hplPo3odJIyTSS_sONw,135547
-httplib2/certs.py,sha256=guhfjMNhDdKJEyYBb5ZyLxVO5q1I7Y_P-4BG8MniBk8,971
-httplib2/error.py,sha256=GyqPUvZeKdVLq0f3xg0uX4rjtv7jVGJuPerAdyc-jfk,954
-httplib2/iri2uri.py,sha256=PhIzEzeR6C73l7piwrNAJlVvlWgsqxtJTlFeXgznzQo,4153
-httplib2/socks.py,sha256=oaeEOnT2rkTNm6wnn0CSdhWzVaVshnnkAKiP4kxKzzc,19701
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/WHEEL
deleted file mode 100644
index 385faab0..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.36.2)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/top_level.txt
deleted file mode 100644
index fb881ece..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2-0.19.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-httplib2
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/__init__.py
old mode 100644
new mode 100755
index 8b240dbf..4312f300
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/__init__.py
@@ -15,7 +15,7 @@
"Alex Yu",
]
__license__ = "MIT"
-__version__ = "0.19.1"
+__version__ = '0.14.0'
import base64
import calendar
@@ -49,8 +49,6 @@
# TODO: remove this fallback and copypasted socksipy module upon py2/3 merge,
# idea is to have soft-dependency on any compatible module called socks
from . import socks
-from . import auth
-from .error import *
from .iri2uri import iri2uri
@@ -81,6 +79,56 @@ def has_timeout(timeout):
RETRIES = 2
+# All exceptions raised here derive from HttpLib2Error
+class HttpLib2Error(Exception):
+ pass
+
+
+# Some exceptions can be caught and optionally
+# be turned back into responses.
+class HttpLib2ErrorWithResponse(HttpLib2Error):
+ def __init__(self, desc, response, content):
+ self.response = response
+ self.content = content
+ HttpLib2Error.__init__(self, desc)
+
+
+class RedirectMissingLocation(HttpLib2ErrorWithResponse):
+ pass
+
+
+class RedirectLimit(HttpLib2ErrorWithResponse):
+ pass
+
+
+class FailedToDecompressContent(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class MalformedHeader(HttpLib2Error):
+ pass
+
+
+class RelativeURIError(HttpLib2Error):
+ pass
+
+
+class ServerNotFoundError(HttpLib2Error):
+ pass
+
+
+class ProxiesUnavailableError(HttpLib2Error):
+ pass
+
+
# Open Items:
# -----------
@@ -113,15 +161,7 @@ def has_timeout(timeout):
"upgrade",
]
-# https://tools.ietf.org/html/rfc7231#section-8.1.3
-SAFE_METHODS = ("GET", "HEAD", "OPTIONS", "TRACE")
-
-# To change, assign to `Http().redirect_codes`
-REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308))
-
-
from httplib2 import certs
-
CA_CERTS = certs.where()
# PROTOCOL_TLS is python 3.5.3+. PROTOCOL_SSLv23 is deprecated.
@@ -129,23 +169,21 @@ def has_timeout(timeout):
# > Selects the highest protocol version that both the client and server support.
# > Despite the name, this option can select “TLS” protocols as well as “SSL”.
# source: https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLS
-DEFAULT_TLS_VERSION = getattr(ssl, "PROTOCOL_TLS", None) or getattr(ssl, "PROTOCOL_SSLv23")
-
+DEFAULT_TLS_VERSION = getattr(ssl, "PROTOCOL_TLS", None) or getattr(
+ ssl, "PROTOCOL_SSLv23"
+)
def _build_ssl_context(
- disable_ssl_certificate_validation,
- ca_certs,
- cert_file=None,
- key_file=None,
- maximum_version=None,
- minimum_version=None,
- key_password=None,
+ disable_ssl_certificate_validation, ca_certs, cert_file=None, key_file=None,
+ maximum_version=None, minimum_version=None,
):
if not hasattr(ssl, "SSLContext"):
raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext")
context = ssl.SSLContext(DEFAULT_TLS_VERSION)
- context.verify_mode = ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED
+ context.verify_mode = (
+ ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED
+ )
# SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+.
# source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version
@@ -169,7 +207,7 @@ def _build_ssl_context(
context.load_verify_locations(ca_certs)
if cert_file:
- context.load_cert_chain(cert_file, key_file, key_password)
+ context.load_cert_chain(cert_file, key_file)
return context
@@ -243,7 +281,10 @@ def safename(filename):
def _normalize_headers(headers):
return dict(
[
- (_convert_byte_str(key).lower(), NORMALIZE_SPACE.sub(_convert_byte_str(value), " ").strip(),)
+ (
+ _convert_byte_str(key).lower(),
+ NORMALIZE_SPACE.sub(_convert_byte_str(value), " ").strip(),
+ )
for (key, value) in headers.items()
]
)
@@ -260,9 +301,13 @@ def _parse_cache_control(headers):
if "cache-control" in headers:
parts = headers["cache-control"].split(",")
parts_with_args = [
- tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")
+ tuple([x.strip().lower() for x in part.split("=", 1)])
+ for part in parts
+ if -1 != part.find("=")
+ ]
+ parts_wo_args = [
+ (name.strip().lower(), 1) for name in parts if -1 == name.find("=")
]
- parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
@@ -270,9 +315,56 @@ def _parse_cache_control(headers):
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
-# Set to true to turn on, useful for testing servers.
+# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
+# In regex below:
+# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
+# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
+# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
+# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?
+WWW_AUTH_STRICT = re.compile(
+ r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$"
+)
+WWW_AUTH_RELAXED = re.compile(
+ r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(? 0:
# service = "wise"
- auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers["user-agent"],)
+ auth = dict(
+ Email=credentials[0],
+ Passwd=credentials[1],
+ service=service,
+ source=headers["user-agent"],
+ )
resp, content = self.http.request(
"https://www.google.com/accounts/ClientLogin",
method="POST",
@@ -747,7 +909,9 @@ class FileCache(object):
be running on the same cache.
"""
- def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
+ def __init__(
+ self, cache, safe=safename
+ ): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
@@ -796,13 +960,7 @@ class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
- def add(self, key, cert, domain, password):
- self.credentials.append((domain.lower(), key, cert, password))
-
- def iter(self, domain):
- for (cdomain, key, cert, password) in self.credentials:
- if cdomain == "" or domain == cdomain:
- yield (key, cert, password)
+ pass
class AllHosts(object):
@@ -815,7 +973,14 @@ class ProxyInfo(object):
bypass_hosts = ()
def __init__(
- self, proxy_type, proxy_host, proxy_port, proxy_rdns=True, proxy_user=None, proxy_pass=None, proxy_headers=None,
+ self,
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns=True,
+ proxy_user=None,
+ proxy_pass=None,
+ proxy_headers=None,
):
"""Args:
@@ -834,19 +999,11 @@ def __init__(
proxy_headers: Additional or modified headers for the proxy connect
request.
"""
- if isinstance(proxy_user, bytes):
- proxy_user = proxy_user.decode()
- if isinstance(proxy_pass, bytes):
- proxy_pass = proxy_pass.decode()
- (
- self.proxy_type,
- self.proxy_host,
- self.proxy_port,
- self.proxy_rdns,
- self.proxy_user,
- self.proxy_pass,
- self.proxy_headers,
- ) = (
+ if isinstance(proxy_user, str):
+ proxy_user = proxy_user.encode()
+ if isinstance(proxy_pass, str):
+ proxy_pass = proxy_pass.encode()
+ self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass, self.proxy_headers = (
proxy_type,
proxy_host,
proxy_port,
@@ -980,18 +1137,14 @@ def __init__(self, host, port=None, timeout=None, proxy_info=None):
def connect(self):
"""Connect to the host and port specified in __init__."""
if self.proxy_info and socks is None:
- raise ProxiesUnavailableError("Proxy support missing but proxy use was requested!")
+ raise ProxiesUnavailableError(
+ "Proxy support missing but proxy use was requested!"
+ )
if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host):
use_proxy = True
- (
- proxy_type,
- proxy_host,
- proxy_port,
- proxy_rdns,
- proxy_user,
- proxy_pass,
- proxy_headers,
- ) = self.proxy_info.astuple()
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = (
+ self.proxy_info.astuple()
+ )
host = proxy_host
port = proxy_port
@@ -1010,7 +1163,12 @@ def connect(self):
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(
- proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass,
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
)
else:
self.sock = socket.socket(af, socktype, proto)
@@ -1018,11 +1176,22 @@ def connect(self):
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
if self.debuglevel > 0:
- print("connect: ({0}, {1}) ************".format(self.host, self.port))
+ print(
+ "connect: ({0}, {1}) ************".format(self.host, self.port)
+ )
if use_proxy:
print(
"proxy: {0} ************".format(
- str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ str(
+ (
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ )
+ )
)
)
@@ -1034,7 +1203,16 @@ def connect(self):
if use_proxy:
print(
"proxy: {0}".format(
- str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ str(
+ (
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ )
+ )
)
)
if self.sock:
@@ -1067,7 +1245,6 @@ def __init__(
disable_ssl_certificate_validation=False,
tls_maximum_version=None,
tls_minimum_version=None,
- key_password=None,
):
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
@@ -1078,34 +1255,25 @@ def __init__(
self.proxy_info = proxy_info("https")
context = _build_ssl_context(
- self.disable_ssl_certificate_validation,
- self.ca_certs,
- cert_file,
- key_file,
- maximum_version=tls_maximum_version,
- minimum_version=tls_minimum_version,
- key_password=key_password,
+ self.disable_ssl_certificate_validation, self.ca_certs, cert_file, key_file,
+ maximum_version=tls_maximum_version, minimum_version=tls_minimum_version,
)
super(HTTPSConnectionWithTimeout, self).__init__(
- host, port=port, timeout=timeout, context=context,
+ host,
+ port=port,
+ key_file=key_file,
+ cert_file=cert_file,
+ timeout=timeout,
+ context=context,
)
- self.key_file = key_file
- self.cert_file = cert_file
- self.key_password = key_password
def connect(self):
"""Connect to a host on a given (SSL) port."""
- if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host):
+ if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
- (
- proxy_type,
- proxy_host,
- proxy_port,
- proxy_rdns,
- proxy_user,
- proxy_pass,
- proxy_headers,
- ) = self.proxy_info.astuple()
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = (
+ self.proxy_info.astuple()
+ )
host = proxy_host
port = proxy_port
@@ -1126,7 +1294,12 @@ def connect(self):
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(
- proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass,
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
)
else:
sock = socket.socket(family, socktype, proto)
@@ -1138,7 +1311,10 @@ def connect(self):
self.sock = self._context.wrap_socket(sock, server_hostname=self.host)
# Python 3.3 compatibility: emulate the check_hostname behavior
- if not hasattr(self._context, "check_hostname") and not self.disable_ssl_certificate_validation:
+ if (
+ not hasattr(self._context, "check_hostname")
+ and not self.disable_ssl_certificate_validation
+ ):
try:
ssl.match_hostname(self.sock.getpeercert(), self.host)
except Exception:
@@ -1151,7 +1327,16 @@ def connect(self):
if use_proxy:
print(
"proxy: {0}".format(
- str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ str(
+ (
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ )
+ )
)
)
except (ssl.SSLError, ssl.CertificateError) as e:
@@ -1166,11 +1351,20 @@ def connect(self):
except socket.error as e:
socket_err = e
if self.debuglevel > 0:
- print("connect fail: ({0}, {1})".format(self.host, self.port))
+ print("connect fail: ({0}, {1})".format((self.host, self.port)))
if use_proxy:
print(
"proxy: {0}".format(
- str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ str(
+ (
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ )
+ )
)
)
if self.sock:
@@ -1265,14 +1459,10 @@ def __init__(
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
- self.redirect_codes = REDIRECT_CODES
-
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
- self.safe_methods = list(SAFE_METHODS)
-
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
@@ -1286,16 +1476,6 @@ def __init__(
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
- def close(self):
- """Close persistent connections, clear sensitive data.
- Not thread-safe, requires external synchronization against concurrent requests.
- """
- existing, self.connections = self.connections, {}
- for _, c in existing.items():
- c.close()
- self.certificates.clear()
- self.clear_credentials()
-
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
@@ -1314,21 +1494,23 @@ def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
- challenges = auth._parse_www_authenticate(response, "www-authenticate")
+ challenges = _parse_www_authenticate(response, "www-authenticate")
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
- yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
+ yield AUTH_SCHEME_CLASSES[scheme](
+ cred, host, request_uri, headers, response, content, self
+ )
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
- def add_certificate(self, key, cert, domain, password=None):
+ def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
- self.certificates.add(key, cert, domain, password)
+ self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
@@ -1352,7 +1534,9 @@ def _conn_request(self, conn, request_uri, method, body, headers):
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except socket.error as e:
- errno_ = e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno
+ errno_ = (
+ e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno
+ )
if errno_ in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
continue # retry on potentially transient errors
raise
@@ -1411,49 +1595,80 @@ def _conn_request(self, conn, request_uri, method, body, headers):
return (response, content)
def _request(
- self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey,
+ self,
+ conn,
+ host,
+ absolute_uri,
+ request_uri,
+ method,
+ body,
+ headers,
+ redirections,
+ cachekey,
):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
- auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
+ auths = [
+ (auth.depth(request_uri), auth)
+ for auth in self.authorizations
+ if auth.inscope(host, request_uri)
+ ]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
- (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ (response, content) = self._conn_request(
+ conn, request_uri, method, body, headers
+ )
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
- (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ (response, content) = self._conn_request(
+ conn, request_uri, method, body, headers
+ )
response._stale_digest = 1
if response.status == 401:
- for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
+ for authorization in self._auth_from_challenge(
+ host, request_uri, headers, response, content
+ ):
authorization.request(method, request_uri, headers, body)
- (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ (response, content) = self._conn_request(
+ conn, request_uri, method, body, headers
+ )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
- if self.follow_all_redirects or method in self.safe_methods or response.status in (303, 308):
- if self.follow_redirects and response.status in self.redirect_codes:
+ if (
+ self.follow_all_redirects
+ or (method in ["GET", "HEAD"])
+ or response.status == 303
+ ):
+ if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if "location" not in response and response.status != 300:
raise RedirectMissingLocation(
- _("Redirected but the response is missing a Location: header."), response, content,
+ _(
+ "Redirected but the response is missing a Location: header."
+ ),
+ response,
+ content,
)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if "location" in response:
location = response["location"]
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
- response["location"] = urllib.parse.urljoin(absolute_uri, location)
- if response.status == 308 or (response.status == 301 and (method in self.safe_methods)):
+ response["location"] = urllib.parse.urljoin(
+ absolute_uri, location
+ )
+ if response.status == 301 and method in ["GET", "HEAD"]:
response["-x-permanent-redirect-url"] = response["location"]
if "content-location" not in response:
response["content-location"] = absolute_uri
@@ -1462,7 +1677,10 @@ def _request(
del headers["if-none-match"]
if "if-modified-since" in headers:
del headers["if-modified-since"]
- if "authorization" in headers and not self.forward_authorization_headers:
+ if (
+ "authorization" in headers
+ and not self.forward_authorization_headers
+ ):
del headers["authorization"]
if "location" in response:
location = response["location"]
@@ -1474,14 +1692,20 @@ def _request(
redirect_method = "GET"
body = None
(response, content) = self.request(
- location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1,
+ location,
+ method=redirect_method,
+ body=body,
+ headers=headers,
+ redirections=redirections - 1,
)
response.previous = old_response
else:
raise RedirectLimit(
- "Redirected more times than redirection_limit allows.", response, content,
+ "Redirected more times than redirection_limit allows.",
+ response,
+ content,
)
- elif response.status in [200, 203] and method in self.safe_methods:
+ elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if "content-location" not in response:
response["content-location"] = absolute_uri
@@ -1497,7 +1721,13 @@ def _normalize_headers(self, headers):
# including all socket.* and httplib.* exceptions.
def request(
- self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None,
+ self,
+ uri,
+ method="GET",
+ body=None,
+ headers=None,
+ redirections=DEFAULT_MAX_REDIRECTS,
+ connection_type=None,
):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
@@ -1519,7 +1749,7 @@ def request(
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
- conn_key = ""
+ conn_key = ''
try:
if headers is None:
@@ -1531,9 +1761,6 @@ def request(
headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
- # Prevent CWE-75 space injection to manipulate request via part of uri.
- # Prevent CWE-93 CRLF injection to modify headers via part of uri.
- uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A")
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
@@ -1555,7 +1782,6 @@ def request(
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
tls_maximum_version=self.tls_maximum_version,
tls_minimum_version=self.tls_minimum_version,
- key_password=certs[0][2],
)
else:
conn = self.connections[conn_key] = connection_type(
@@ -1577,7 +1803,6 @@ def request(
headers["accept-encoding"] = "gzip, deflate"
info = email.message.Message()
- cachekey = None
cached_value = None
if self.cache:
cachekey = defrag_uri
@@ -1588,11 +1813,15 @@ def request(
info = email.message_from_bytes(info)
for k, v in info.items():
if v.startswith("=?") and v.endswith("?="):
- info.replace_header(k, str(*email.header.decode_header(v)[0]))
+ info.replace_header(
+ k, str(*email.header.decode_header(v)[0])
+ )
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
+ else:
+ cachekey = None
if (
method in self.optimistic_concurrency_methods
@@ -1604,15 +1833,13 @@ def request(
# http://www.w3.org/1999/04/Editing/
headers["if-match"] = info["etag"]
- # https://tools.ietf.org/html/rfc7234
- # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location
- # when a non-error status code is received in response to an unsafe request method.
- if self.cache and cachekey and method not in self.safe_methods:
+ if method not in ["GET", "HEAD"] and self.cache and cachekey:
+ # RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
- if method in self.safe_methods and "vary" in info:
+ if method in ["GET", "HEAD"] and "vary" in info:
vary = info["vary"]
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
@@ -1623,23 +1850,22 @@ def request(
break
if (
- self.cache
- and cached_value
- and (method in self.safe_methods or info["status"] == "308")
+ cached_value
+ and method in ["GET", "HEAD"]
+ and self.cache
and "range" not in headers
):
- redirect_method = method
- if info["status"] not in ("307", "308"):
- redirect_method = "GET"
if "-x-permanent-redirect-url" in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit(
- "Redirected more times than redirection_limit allows.", {}, "",
+ "Redirected more times than redirection_limit allows.",
+ {},
+ "",
)
(response, new_content) = self.request(
info["-x-permanent-redirect-url"],
- method=redirect_method,
+ method="GET",
headers=headers,
redirections=redirections - 1,
)
@@ -1666,7 +1892,11 @@ def request(
return (response, content)
if entry_disposition == "STALE":
- if "etag" in info and not self.ignore_etag and not "if-none-match" in headers:
+ if (
+ "etag" in info
+ and not self.ignore_etag
+ and not "if-none-match" in headers
+ ):
headers["if-none-match"] = info["etag"]
if "last-modified" in info and not "last-modified" in headers:
headers["if-modified-since"] = info["last-modified"]
@@ -1674,7 +1904,15 @@ def request(
pass
(response, new_content) = self._request(
- conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ conn,
+ authority,
+ uri,
+ request_uri,
+ method,
+ body,
+ headers,
+ redirections,
+ cachekey,
)
if response.status == 304 and method == "GET":
@@ -1688,7 +1926,9 @@ def request(
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
- _updateCache(headers, merged_response, content, self.cache, cachekey)
+ _updateCache(
+ headers, merged_response, content, self.cache, cachekey
+ )
response = merged_response
response.status = 200
response.fromcache = True
@@ -1706,7 +1946,15 @@ def request(
content = b""
else:
(response, content) = self._request(
- conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ conn,
+ authority,
+ uri,
+ request_uri,
+ method,
+ body,
+ headers,
+ redirections,
+ cachekey,
)
except Exception as e:
is_timeout = isinstance(e, socket.timeout)
@@ -1723,11 +1971,23 @@ def request(
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = b"Request Timeout"
- response = Response({"content-type": "text/plain", "status": "408", "content-length": len(content),})
+ response = Response(
+ {
+ "content-type": "text/plain",
+ "status": "408",
+ "content-length": len(content),
+ }
+ )
response.reason = "Request Timeout"
else:
content = str(e).encode("utf-8")
- response = Response({"content-type": "text/plain", "status": "400", "content-length": len(content),})
+ response = Response(
+ {
+ "content-type": "text/plain",
+ "status": "400",
+ "content-length": len(content),
+ }
+ )
response.reason = "Bad Request"
else:
raise
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/auth.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/auth.py
deleted file mode 100644
index 84b58317..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/auth.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import base64
-import re
-
-import pyparsing as pp
-
-from .error import *
-
-UNQUOTE_PAIRS = re.compile(r"\\(.)")
-unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1])
-
-# https://tools.ietf.org/html/rfc7235#section-1.2
-# https://tools.ietf.org/html/rfc7235#appendix-B
-tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas
-token = pp.Word(tchar).setName("token")
-token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.Optional(pp.Word("=").leaveWhitespace())).setName(
- "token68"
-)
-
-quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote)
-auth_param_name = token.copy().setName("auth-param-name").addParseAction(pp.downcaseTokens)
-auth_param = auth_param_name + pp.Suppress("=") + (quoted_string | token)
-params = pp.Dict(pp.delimitedList(pp.Group(auth_param)))
-
-scheme = token("scheme")
-challenge = scheme + (params("params") | token68("token"))
-
-authentication_info = params.copy()
-www_authenticate = pp.delimitedList(pp.Group(challenge))
-
-
-def _parse_authentication_info(headers, headername="authentication-info"):
- """https://tools.ietf.org/html/rfc7615
- """
- header = headers.get(headername, "").strip()
- if not header:
- return {}
- try:
- parsed = authentication_info.parseString(header)
- except pp.ParseException as ex:
- # print(ex.explain(ex))
- raise MalformedHeader(headername)
-
- return parsed.asDict()
-
-
-def _parse_www_authenticate(headers, headername="www-authenticate"):
- """Returns a dictionary of dictionaries, one dict per auth_scheme."""
- header = headers.get(headername, "").strip()
- if not header:
- return {}
- try:
- parsed = www_authenticate.parseString(header)
- except pp.ParseException as ex:
- # print(ex.explain(ex))
- raise MalformedHeader(headername)
-
- retval = {
- challenge["scheme"].lower(): challenge["params"].asDict()
- if "params" in challenge
- else {"token": challenge.get("token")}
- for challenge in parsed
- }
- return retval
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/certs.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/certs.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/error.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/error.py
deleted file mode 100644
index 0e68c12a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/error.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# All exceptions raised here derive from HttpLib2Error
-class HttpLib2Error(Exception):
- pass
-
-
-# Some exceptions can be caught and optionally
-# be turned back into responses.
-class HttpLib2ErrorWithResponse(HttpLib2Error):
- def __init__(self, desc, response, content):
- self.response = response
- self.content = content
- HttpLib2Error.__init__(self, desc)
-
-
-class RedirectMissingLocation(HttpLib2ErrorWithResponse):
- pass
-
-
-class RedirectLimit(HttpLib2ErrorWithResponse):
- pass
-
-
-class FailedToDecompressContent(HttpLib2ErrorWithResponse):
- pass
-
-
-class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
- pass
-
-
-class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
- pass
-
-
-class MalformedHeader(HttpLib2Error):
- pass
-
-
-class RelativeURIError(HttpLib2Error):
- pass
-
-
-class ServerNotFoundError(HttpLib2Error):
- pass
-
-
-class ProxiesUnavailableError(HttpLib2Error):
- pass
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/iri2uri.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/iri2uri.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/socks.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/socks.py
old mode 100644
new mode 100755
index cc68e634..2926b4e5
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/socks.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/httplib2/socks.py
@@ -238,15 +238,7 @@ def setproxy(
headers - Additional or modified headers for the proxy connect
request.
"""
- self.__proxy = (
- proxytype,
- addr,
- port,
- rdns,
- username.encode() if username else None,
- password.encode() if password else None,
- headers,
- )
+ self.__proxy = (proxytype, addr, port, rdns, username, password, headers)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/LICENSE.md b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/LICENSE.md
deleted file mode 100644
index b6f87326..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/LICENSE.md
+++ /dev/null
@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2013-2021, Kim Davies
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/METADATA
deleted file mode 100644
index 6446805d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/METADATA
+++ /dev/null
@@ -1,236 +0,0 @@
-Metadata-Version: 2.1
-Name: idna
-Version: 3.3
-Summary: Internationalized Domain Names in Applications (IDNA)
-Home-page: https://github.com/kjd/idna
-Author: Kim Davies
-Author-email: kim@cynosure.com.au
-License: BSD-3-Clause
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: System Administrators
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Internet :: Name Service (DNS)
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Utilities
-Requires-Python: >=3.5
-License-File: LICENSE.md
-
-Internationalized Domain Names in Applications (IDNA)
-=====================================================
-
-Support for the Internationalised Domain Names in Applications
-(IDNA) protocol as specified in `RFC 5891 `_.
-This is the latest version of the protocol and is sometimes referred to as
-“IDNA 2008”.
-
-This library also provides support for Unicode Technical Standard 46,
-`Unicode IDNA Compatibility Processing `_.
-
-This acts as a suitable replacement for the “encodings.idna” module that
-comes with the Python standard library, but which only supports the
-older superseded IDNA specification (`RFC 3490 `_).
-
-Basic functions are simply executed:
-
-.. code-block:: pycon
-
- >>> import idna
- >>> idna.encode('ドメイン.テスト')
- b'xn--eckwd4c7c.xn--zckzah'
- >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
- ドメイン.テスト
-
-
-Installation
-------------
-
-To install this library, you can use pip:
-
-.. code-block:: bash
-
- $ pip install idna
-
-Alternatively, you can install the package using the bundled setup script:
-
-.. code-block:: bash
-
- $ python setup.py install
-
-
-Usage
------
-
-For typical usage, the ``encode`` and ``decode`` functions will take a domain
-name argument and perform a conversion to A-labels or U-labels respectively.
-
-.. code-block:: pycon
-
- >>> import idna
- >>> idna.encode('ドメイン.テスト')
- b'xn--eckwd4c7c.xn--zckzah'
- >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
- ドメイン.テスト
-
-You may use the codec encoding and decoding methods using the
-``idna.codec`` module:
-
-.. code-block:: pycon
-
- >>> import idna.codec
- >>> print('домен.испытание'.encode('idna'))
- b'xn--d1acufc.xn--80akhbyknj4f'
- >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna'))
- домен.испытание
-
-Conversions can be applied at a per-label basis using the ``ulabel`` or ``alabel``
-functions if necessary:
-
-.. code-block:: pycon
-
- >>> idna.alabel('测试')
- b'xn--0zwm56d'
-
-Compatibility Mapping (UTS #46)
-+++++++++++++++++++++++++++++++
-
-As described in `RFC 5895 `_, the IDNA
-specification does not normalize input from different potential ways a user
-may input a domain name. This functionality, known as a “mapping”, is
-considered by the specification to be a local user-interface issue distinct
-from IDNA conversion functionality.
-
-This library provides one such mapping, that was developed by the Unicode
-Consortium. Known as `Unicode IDNA Compatibility Processing `_,
-it provides for both a regular mapping for typical applications, as well as
-a transitional mapping to help migrate from older IDNA 2003 applications.
-
-For example, “Königsgäßchen” is not a permissible label as *LATIN CAPITAL
-LETTER K* is not allowed (nor are capital letters in general). UTS 46 will
-convert this into lower case prior to applying the IDNA conversion.
-
-.. code-block:: pycon
-
- >>> import idna
- >>> idna.encode('Königsgäßchen')
- ...
- idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed
- >>> idna.encode('Königsgäßchen', uts46=True)
- b'xn--knigsgchen-b4a3dun'
- >>> print(idna.decode('xn--knigsgchen-b4a3dun'))
- königsgäßchen
-
-Transitional processing provides conversions to help transition from the older
-2003 standard to the current standard. For example, in the original IDNA
-specification, the *LATIN SMALL LETTER SHARP S* (ß) was converted into two
-*LATIN SMALL LETTER S* (ss), whereas in the current IDNA specification this
-conversion is not performed.
-
-.. code-block:: pycon
-
- >>> idna.encode('Königsgäßchen', uts46=True, transitional=True)
- 'xn--knigsgsschen-lcb0w'
-
-Implementors should use transitional processing with caution, only in rare
-cases where conversion from legacy labels to current labels must be performed
-(i.e. IDNA implementations that pre-date 2008). For typical applications
-that just need to convert labels, transitional processing is unlikely to be
-beneficial and could produce unexpected incompatible results.
-
-``encodings.idna`` Compatibility
-++++++++++++++++++++++++++++++++
-
-Function calls from the Python built-in ``encodings.idna`` module are
-mapped to their IDNA 2008 equivalents using the ``idna.compat`` module.
-Simply substitute the ``import`` clause in your code to refer to the
-new module name.
-
-Exceptions
-----------
-
-All errors raised during the conversion following the specification should
-raise an exception derived from the ``idna.IDNAError`` base class.
-
-More specific exceptions that may be generated as ``idna.IDNABidiError``
-when the error reflects an illegal combination of left-to-right and
-right-to-left characters in a label; ``idna.InvalidCodepoint`` when
-a specific codepoint is an illegal character in an IDN label (i.e.
-INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is
-illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ
-but the contextual requirements are not satisfied.)
-
-Building and Diagnostics
-------------------------
-
-The IDNA and UTS 46 functionality relies upon pre-calculated lookup
-tables for performance. These tables are derived from computing against
-eligibility criteria in the respective standards. These tables are
-computed using the command-line script ``tools/idna-data``.
-
-This tool will fetch relevant codepoint data from the Unicode repository
-and perform the required calculations to identify eligibility. There are
-three main modes:
-
-* ``idna-data make-libdata``. Generates ``idnadata.py`` and ``uts46data.py``,
- the pre-calculated lookup tables using for IDNA and UTS 46 conversions. Implementors
- who wish to track this library against a different Unicode version may use this tool
- to manually generate a different version of the ``idnadata.py`` and ``uts46data.py``
- files.
-
-* ``idna-data make-table``. Generate a table of the IDNA disposition
- (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix B.1 of RFC
- 5892 and the pre-computed tables published by `IANA `_.
-
-* ``idna-data U+0061``. Prints debugging output on the various properties
- associated with an individual Unicode codepoint (in this case, U+0061), that are
- used to assess the IDNA and UTS 46 status of a codepoint. This is helpful in debugging
- or analysis.
-
-The tool accepts a number of arguments, described using ``idna-data -h``. Most notably,
-the ``--version`` argument allows the specification of the version of Unicode to use
-in computing the table data. For example, ``idna-data --version 9.0.0 make-libdata``
-will generate library data against Unicode 9.0.0.
-
-
-Additional Notes
-----------------
-
-* **Packages**. The latest tagged release version is published in the
- `Python Package Index `_.
-
-* **Version support**. This library supports Python 3.5 and higher. As this library
- serves as a low-level toolkit for a variety of applications, many of which strive
- for broad compatibility with older Python versions, there is no rush to remove
- older intepreter support. Removing support for older versions should be well
- justified in that the maintenance burden has become too high.
-
-* **Python 2**. Python 2 is supported by version 2.x of this library. While active
- development of the version 2.x series has ended, notable issues being corrected
- may be backported to 2.x. Use "idna<3" in your requirements file if you need this
- library for a Python 2 application.
-
-* **Testing**. The library has a test suite based on each rule of the IDNA specification, as
- well as tests that are provided as part of the Unicode Technical Standard 46,
- `Unicode IDNA Compatibility Processing `_.
-
-* **Emoji**. It is an occasional request to support emoji domains in this library. Encoding
- of symbols like emoji is expressly prohibited by the technical standard IDNA 2008 and
- emoji domains are broadly phased out across the domain industry due to associated security
- risks. For now, applications that wish need to support these non-compliant labels may
- wish to consider trying the encode/decode operation in this library first, and then falling
- back to using `encodings.idna`. See `the Github project `_
- for more discussion.
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/RECORD
deleted file mode 100644
index 79e2d8e4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/RECORD
+++ /dev/null
@@ -1,15 +0,0 @@
-idna-3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-idna-3.3.dist-info/LICENSE.md,sha256=otbk2UC9JNvnuWRc3hmpeSzFHbeuDVrNMBrIYMqj6DY,1523
-idna-3.3.dist-info/METADATA,sha256=BdqiAf8ou4x1nzIHp2_sDfXWjl7BrSUGpOeVzbYHQuQ,9765
-idna-3.3.dist-info/RECORD,,
-idna-3.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
-idna-3.3.dist-info/top_level.txt,sha256=jSag9sEDqvSPftxOQy-ABfGV_RSy7oFh4zZJpODV8k0,5
-idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
-idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374
-idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
-idna/core.py,sha256=RFIkY-HhFZaDoBEFjGwyGd_vWI04uOAQjnzueMWqwOU,12795
-idna/idnadata.py,sha256=fzMzkCea2xieVxcrjngJ-2pLsKQNejPCZFlBajIuQdw,44025
-idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
-idna/package_data.py,sha256=szxQhV0ZD0nKJ84Kuobw3l8q4_KeCyXjFRdpwIpKZmw,21
-idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-idna/uts46data.py,sha256=o-D7V-a0fOLZNd7tvxof6MYfUd0TBZzE2bLR5XO67xU,204400
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/WHEEL
deleted file mode 100644
index 5bad85fd..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.0)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/top_level.txt
deleted file mode 100644
index c40472e6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna-3.3.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-idna
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/__init__.py
old mode 100644
new mode 100755
index a40eeafc..847bf935
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/__init__.py
@@ -1,44 +1,2 @@
from .package_data import __version__
-from .core import (
- IDNABidiError,
- IDNAError,
- InvalidCodepoint,
- InvalidCodepointContext,
- alabel,
- check_bidi,
- check_hyphen_ok,
- check_initial_combiner,
- check_label,
- check_nfc,
- decode,
- encode,
- ulabel,
- uts46_remap,
- valid_contextj,
- valid_contexto,
- valid_label_length,
- valid_string_length,
-)
-from .intranges import intranges_contain
-
-__all__ = [
- "IDNABidiError",
- "IDNAError",
- "InvalidCodepoint",
- "InvalidCodepointContext",
- "alabel",
- "check_bidi",
- "check_hyphen_ok",
- "check_initial_combiner",
- "check_label",
- "check_nfc",
- "decode",
- "encode",
- "intranges_contain",
- "ulabel",
- "uts46_remap",
- "valid_contextj",
- "valid_contexto",
- "valid_label_length",
- "valid_string_length",
-]
+from .core import *
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/codec.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/codec.py
old mode 100644
new mode 100755
index 1ca9ba62..98c65ead
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/codec.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/codec.py
@@ -1,40 +1,41 @@
from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
-from typing import Tuple, Optional
-_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
class Codec(codecs.Codec):
- def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
+ def encode(self, data, errors='strict'):
+
if errors != 'strict':
- raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
- return b"", 0
+ return "", 0
return encode(data), len(data)
- def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
+ def decode(self, data, errors='strict'):
+
if errors != 'strict':
- raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
- return '', 0
+ return u"", 0
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
- def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ def _buffer_encode(self, data, errors, final):
if errors != 'strict':
- raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
- return "", 0
+ return ("", 0)
labels = _unicode_dots_re.split(data)
- trailing_dot = ''
+ trailing_dot = u''
if labels:
if not labels[-1]:
trailing_dot = '.'
@@ -54,29 +55,37 @@ def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]
size += len(label)
# Join with U+002E
- result_str = '.'.join(result) + trailing_dot # type: ignore
+ result = ".".join(result) + trailing_dot
size += len(trailing_dot)
- return result_str, size
+ return (result, size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ def _buffer_decode(self, data, errors, final):
if errors != 'strict':
- raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
if not data:
- return ('', 0)
-
- labels = _unicode_dots_re.split(data)
- trailing_dot = ''
+ return (u"", 0)
+
+ # IDNA allows decoding to operate on Unicode strings, too.
+ if isinstance(data, unicode):
+ labels = _unicode_dots_re.split(data)
+ else:
+ # Must be ASCII string
+ data = str(data)
+ unicode(data, "ascii")
+ labels = data.split(".")
+
+ trailing_dot = u''
if labels:
if not labels[-1]:
- trailing_dot = '.'
+ trailing_dot = u'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
- trailing_dot = '.'
+ trailing_dot = u'.'
result = []
size = 0
@@ -86,25 +95,22 @@ def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]
size += 1
size += len(label)
- result_str = '.'.join(result) + trailing_dot
+ result = u".".join(result) + trailing_dot
size += len(trailing_dot)
- return (result_str, size)
+ return (result, size)
class StreamWriter(Codec, codecs.StreamWriter):
pass
-
class StreamReader(Codec, codecs.StreamReader):
pass
-
-def getregentry() -> codecs.CodecInfo:
- # Compatibility as a search_function for codecs.register()
+def getregentry():
return codecs.CodecInfo(
name='idna',
- encode=Codec().encode, # type: ignore
- decode=Codec().decode, # type: ignore
+ encode=Codec().encode,
+ decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/compat.py
old mode 100644
new mode 100755
index 786e6bda..4d47f336
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/compat.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/compat.py
@@ -1,13 +1,12 @@
from .core import *
from .codec import *
-from typing import Any, Union
-def ToASCII(label: str) -> bytes:
+def ToASCII(label):
return encode(label)
-def ToUnicode(label: Union[bytes, bytearray]) -> str:
+def ToUnicode(label):
return decode(label)
-def nameprep(s: Any) -> None:
- raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol')
+def nameprep(s):
+ raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/core.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/core.py
old mode 100644
new mode 100755
index 55ab9678..104624ad
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/core.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/core.py
@@ -2,12 +2,16 @@
import bisect
import unicodedata
import re
-from typing import Union, Optional
+import sys
from .intranges import intranges_contain
_virama_combining_class = 9
_alabel_prefix = b'xn--'
-_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
+
+if sys.version_info[0] == 3:
+ unicode = str
+ unichr = chr
class IDNAError(UnicodeError):
""" Base exception for all IDNA-encoding related problems """
@@ -29,43 +33,46 @@ class InvalidCodepointContext(IDNAError):
pass
-def _combining_class(cp: int) -> int:
- v = unicodedata.combining(chr(cp))
+def _combining_class(cp):
+ v = unicodedata.combining(unichr(cp))
if v == 0:
- if not unicodedata.name(chr(cp)):
- raise ValueError('Unknown character in unicodedata')
+ if not unicodedata.name(unichr(cp)):
+ raise ValueError("Unknown character in unicodedata")
return v
-def _is_script(cp: str, script: str) -> bool:
+def _is_script(cp, script):
return intranges_contain(ord(cp), idnadata.scripts[script])
-def _punycode(s: str) -> bytes:
+def _punycode(s):
return s.encode('punycode')
-def _unot(s: int) -> str:
- return 'U+{:04X}'.format(s)
+def _unot(s):
+ return 'U+{0:04X}'.format(s)
+
+def valid_label_length(label):
-def valid_label_length(label: Union[bytes, str]) -> bool:
if len(label) > 63:
return False
return True
-def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
+def valid_string_length(label, trailing_dot):
+
if len(label) > (254 if trailing_dot else 253):
return False
return True
-def check_bidi(label: str, check_ltr: bool = False) -> bool:
+def check_bidi(label, check_ltr=False):
+
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == '':
# String likely comes from a newer version of Unicode
- raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
+ raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
if direction in ['R', 'AL', 'AN']:
bidi_label = True
if not bidi_label and not check_ltr:
@@ -78,17 +85,17 @@ def check_bidi(label: str, check_ltr: bool = False) -> bool:
elif direction == 'L':
rtl = False
else:
- raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
+ raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
valid_ending = False
- number_type = None # type: Optional[str]
+ number_type = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
- raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
+ raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
# Bidi rule 3
if direction in ['R', 'AL', 'EN', 'AN']:
valid_ending = True
@@ -104,7 +111,7 @@ def check_bidi(label: str, check_ltr: bool = False) -> bool:
else:
# Bidi rule 5
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
- raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
+ raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
# Bidi rule 6
if direction in ['L', 'EN']:
valid_ending = True
@@ -117,13 +124,15 @@ def check_bidi(label: str, check_ltr: bool = False) -> bool:
return True
-def check_initial_combiner(label: str) -> bool:
+def check_initial_combiner(label):
+
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
-def check_hyphen_ok(label: str) -> bool:
+def check_hyphen_ok(label):
+
if label[2:4] == '--':
raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
if label[0] == '-' or label[-1] == '-':
@@ -131,12 +140,14 @@ def check_hyphen_ok(label: str) -> bool:
return True
-def check_nfc(label: str) -> None:
+def check_nfc(label):
+
if unicodedata.normalize('NFC', label) != label:
raise IDNAError('Label must be in Normalization Form C')
-def valid_contextj(label: str, pos: int) -> bool:
+def valid_contextj(label, pos):
+
cp_value = ord(label[pos])
if cp_value == 0x200c:
@@ -179,7 +190,8 @@ def valid_contextj(label: str, pos: int) -> bool:
return False
-def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
+def valid_contexto(label, pos, exception=False):
+
cp_value = ord(label[pos])
if cp_value == 0x00b7:
@@ -200,7 +212,7 @@ def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
elif cp_value == 0x30fb:
for cp in label:
- if cp == '\u30fb':
+ if cp == u'\u30fb':
continue
if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
return True
@@ -218,10 +230,9 @@ def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
return False
return True
- return False
+def check_label(label):
-def check_label(label: Union[str, bytes, bytearray]) -> None:
if isinstance(label, (bytes, bytearray)):
label = label.decode('utf-8')
if len(label) == 0:
@@ -238,108 +249,98 @@ def check_label(label: Union[str, bytes, bytearray]) -> None:
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
try:
if not valid_contextj(label, pos):
- raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
+ raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(
_unot(cp_value), pos+1, repr(label)))
except ValueError:
- raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
+ raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format(
_unot(cp_value), pos+1, repr(label)))
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
if not valid_contexto(label, pos):
- raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
+ raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label)))
else:
- raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
+ raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
check_bidi(label)
-def alabel(label: str) -> bytes:
+def alabel(label):
+
try:
- label_bytes = label.encode('ascii')
- ulabel(label_bytes)
- if not valid_label_length(label_bytes):
+ label = label.encode('ascii')
+ ulabel(label)
+ if not valid_label_length(label):
raise IDNAError('Label too long')
- return label_bytes
+ return label
except UnicodeEncodeError:
pass
if not label:
raise IDNAError('No Input')
- label = str(label)
+ label = unicode(label)
check_label(label)
- label_bytes = _punycode(label)
- label_bytes = _alabel_prefix + label_bytes
+ label = _punycode(label)
+ label = _alabel_prefix + label
- if not valid_label_length(label_bytes):
+ if not valid_label_length(label):
raise IDNAError('Label too long')
- return label_bytes
+ return label
+
+def ulabel(label):
-def ulabel(label: Union[str, bytes, bytearray]) -> str:
if not isinstance(label, (bytes, bytearray)):
try:
- label_bytes = label.encode('ascii')
+ label = label.encode('ascii')
except UnicodeEncodeError:
check_label(label)
return label
+
+ label = label.lower()
+ if label.startswith(_alabel_prefix):
+ label = label[len(_alabel_prefix):]
else:
- label_bytes = label
-
- label_bytes = label_bytes.lower()
- if label_bytes.startswith(_alabel_prefix):
- label_bytes = label_bytes[len(_alabel_prefix):]
- if not label_bytes:
- raise IDNAError('Malformed A-label, no Punycode eligible content found')
- if label_bytes.decode('ascii')[-1] == '-':
- raise IDNAError('A-label must not end with a hyphen')
- else:
- check_label(label_bytes)
- return label_bytes.decode('ascii')
+ check_label(label)
+ return label.decode('ascii')
- try:
- label = label_bytes.decode('punycode')
- except UnicodeError:
- raise IDNAError('Invalid A-label')
+ label = label.decode('punycode')
check_label(label)
return label
-def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
+def uts46_remap(domain, std3_rules=True, transitional=False):
"""Re-map the characters in the string according to UTS46 processing."""
from .uts46data import uts46data
- output = ''
-
- for pos, char in enumerate(domain):
- code_point = ord(char)
- try:
+ output = u""
+ try:
+ for pos, char in enumerate(domain):
+ code_point = ord(char)
uts46row = uts46data[code_point if code_point < 256 else
- bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
+ bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
status = uts46row[1]
- replacement = None # type: Optional[str]
- if len(uts46row) == 3:
- replacement = uts46row[2] # type: ignore
- if (status == 'V' or
- (status == 'D' and not transitional) or
- (status == '3' and not std3_rules and replacement is None)):
+ replacement = uts46row[2] if len(uts46row) == 3 else None
+ if (status == "V" or
+ (status == "D" and not transitional) or
+ (status == "3" and not std3_rules and replacement is None)):
output += char
- elif replacement is not None and (status == 'M' or
- (status == '3' and not std3_rules) or
- (status == 'D' and transitional)):
+ elif replacement is not None and (status == "M" or
+ (status == "3" and not std3_rules) or
+ (status == "D" and transitional)):
output += replacement
- elif status != 'I':
+ elif status != "I":
raise IndexError()
- except IndexError:
- raise InvalidCodepoint(
- 'Codepoint {} not allowed at position {} in {}'.format(
- _unot(code_point), pos + 1, repr(domain)))
+ return unicodedata.normalize("NFC", output)
+ except IndexError:
+ raise InvalidCodepoint(
+ "Codepoint {0} not allowed at position {1} in {2}".format(
+ _unot(code_point), pos + 1, repr(domain)))
- return unicodedata.normalize('NFC', output)
+def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
-def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
if isinstance(s, (bytes, bytearray)):
- s = s.decode('ascii')
+ s = s.decode("ascii")
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
@@ -367,12 +368,10 @@ def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool =
return s
-def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
- try:
- if isinstance(s, (bytes, bytearray)):
- s = s.decode('ascii')
- except UnicodeDecodeError:
- raise IDNAError('Invalid ASCII in A-label')
+def decode(s, strict=False, uts46=False, std3_rules=False):
+
+ if isinstance(s, (bytes, bytearray)):
+ s = s.decode("ascii")
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
@@ -380,7 +379,7 @@ def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool =
if not strict:
labels = _unicode_dots_re.split(s)
else:
- labels = s.split('.')
+ labels = s.split(u'.')
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if not labels[-1]:
@@ -393,5 +392,5 @@ def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool =
else:
raise IDNAError('Empty label')
if trailing_dot:
- result.append('')
- return '.'.join(result)
+ result.append(u'')
+ return u'.'.join(result)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/idnadata.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/idnadata.py
old mode 100644
new mode 100755
index 1b5805d1..a80c959d
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/idnadata.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/idnadata.py
@@ -1,6 +1,6 @@
# This file is automatically generated by tools/idna-data
-__version__ = '14.0.0'
+__version__ = "11.0.0"
scripts = {
'Greek': (
0x37000000374,
@@ -48,19 +48,16 @@
0x300700003008,
0x30210000302a,
0x30380000303c,
- 0x340000004dc0,
- 0x4e000000a000,
+ 0x340000004db6,
+ 0x4e0000009ff0,
0xf9000000fa6e,
0xfa700000fada,
- 0x16fe200016fe4,
- 0x16ff000016ff2,
- 0x200000002a6e0,
- 0x2a7000002b739,
+ 0x200000002a6d7,
+ 0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
0x2f8000002fa1e,
- 0x300000003134b,
),
'Hebrew': (
0x591000005c8,
@@ -76,8 +73,7 @@
'Hiragana': (
0x304100003097,
0x309d000030a0,
- 0x1b0010001b120,
- 0x1b1500001b153,
+ 0x1b0010001b11f,
0x1f2000001f201,
),
'Katakana': (
@@ -88,12 +84,7 @@
0x330000003358,
0xff660000ff70,
0xff710000ff9e,
- 0x1aff00001aff4,
- 0x1aff50001affc,
- 0x1affd0001afff,
0x1b0000001b001,
- 0x1b1200001b123,
- 0x1b1640001b168,
),
}
joining_types = {
@@ -396,9 +387,9 @@
0x853: 68,
0x854: 82,
0x855: 68,
- 0x856: 82,
- 0x857: 82,
- 0x858: 82,
+ 0x856: 85,
+ 0x857: 85,
+ 0x858: 85,
0x860: 68,
0x861: 85,
0x862: 68,
@@ -410,39 +401,6 @@
0x868: 68,
0x869: 82,
0x86a: 82,
- 0x870: 82,
- 0x871: 82,
- 0x872: 82,
- 0x873: 82,
- 0x874: 82,
- 0x875: 82,
- 0x876: 82,
- 0x877: 82,
- 0x878: 82,
- 0x879: 82,
- 0x87a: 82,
- 0x87b: 82,
- 0x87c: 82,
- 0x87d: 82,
- 0x87e: 82,
- 0x87f: 82,
- 0x880: 82,
- 0x881: 82,
- 0x882: 82,
- 0x883: 67,
- 0x884: 67,
- 0x885: 67,
- 0x886: 68,
- 0x887: 85,
- 0x888: 85,
- 0x889: 68,
- 0x88a: 68,
- 0x88b: 68,
- 0x88c: 68,
- 0x88d: 68,
- 0x88e: 82,
- 0x890: 85,
- 0x891: 85,
0x8a0: 68,
0x8a1: 68,
0x8a2: 68,
@@ -464,7 +422,6 @@
0x8b2: 82,
0x8b3: 68,
0x8b4: 68,
- 0x8b5: 68,
0x8b6: 68,
0x8b7: 68,
0x8b8: 68,
@@ -473,17 +430,6 @@
0x8bb: 68,
0x8bc: 68,
0x8bd: 68,
- 0x8be: 68,
- 0x8bf: 68,
- 0x8c0: 68,
- 0x8c1: 68,
- 0x8c2: 68,
- 0x8c3: 68,
- 0x8c4: 68,
- 0x8c5: 68,
- 0x8c6: 68,
- 0x8c7: 68,
- 0x8c8: 68,
0x8e2: 85,
0x1806: 85,
0x1807: 68,
@@ -808,52 +754,6 @@
0x10f52: 68,
0x10f53: 68,
0x10f54: 82,
- 0x10f70: 68,
- 0x10f71: 68,
- 0x10f72: 68,
- 0x10f73: 68,
- 0x10f74: 82,
- 0x10f75: 82,
- 0x10f76: 68,
- 0x10f77: 68,
- 0x10f78: 68,
- 0x10f79: 68,
- 0x10f7a: 68,
- 0x10f7b: 68,
- 0x10f7c: 68,
- 0x10f7d: 68,
- 0x10f7e: 68,
- 0x10f7f: 68,
- 0x10f80: 68,
- 0x10f81: 68,
- 0x10fb0: 68,
- 0x10fb1: 85,
- 0x10fb2: 68,
- 0x10fb3: 68,
- 0x10fb4: 82,
- 0x10fb5: 82,
- 0x10fb6: 82,
- 0x10fb7: 85,
- 0x10fb8: 68,
- 0x10fb9: 82,
- 0x10fba: 82,
- 0x10fbb: 68,
- 0x10fbc: 68,
- 0x10fbd: 82,
- 0x10fbe: 68,
- 0x10fbf: 68,
- 0x10fc0: 85,
- 0x10fc1: 68,
- 0x10fc2: 82,
- 0x10fc3: 82,
- 0x10fc4: 68,
- 0x10fc5: 85,
- 0x10fc6: 85,
- 0x10fc7: 85,
- 0x10fc8: 85,
- 0x10fc9: 82,
- 0x10fca: 68,
- 0x10fcb: 76,
0x110bd: 85,
0x110cd: 85,
0x1e900: 68,
@@ -924,7 +824,6 @@
0x1e941: 68,
0x1e942: 68,
0x1e943: 68,
- 0x1e94b: 84,
}
codepoint_classes = {
'PVALID': (
@@ -1226,9 +1125,9 @@
0x8000000082e,
0x8400000085c,
0x8600000086b,
- 0x87000000888,
- 0x8890000088f,
- 0x898000008e2,
+ 0x8a0000008b5,
+ 0x8b6000008be,
+ 0x8d3000008e2,
0x8e300000958,
0x96000000964,
0x96600000970,
@@ -1286,7 +1185,7 @@
0xb3c00000b45,
0xb4700000b49,
0xb4b00000b4e,
- 0xb5500000b58,
+ 0xb5600000b58,
0xb5f00000b64,
0xb6600000b70,
0xb7100000b72,
@@ -1310,12 +1209,11 @@
0xc0e00000c11,
0xc1200000c29,
0xc2a00000c3a,
- 0xc3c00000c45,
+ 0xc3d00000c45,
0xc4600000c49,
0xc4a00000c4e,
0xc5500000c57,
0xc5800000c5b,
- 0xc5d00000c5e,
0xc6000000c64,
0xc6600000c70,
0xc8000000c84,
@@ -1328,11 +1226,12 @@
0xcc600000cc9,
0xcca00000cce,
0xcd500000cd7,
- 0xcdd00000cdf,
+ 0xcde00000cdf,
0xce000000ce4,
0xce600000cf0,
0xcf100000cf3,
- 0xd0000000d0d,
+ 0xd0000000d04,
+ 0xd0500000d0d,
0xd0e00000d11,
0xd1200000d45,
0xd4600000d49,
@@ -1341,7 +1240,7 @@
0xd5f00000d64,
0xd6600000d70,
0xd7a00000d80,
- 0xd8100000d84,
+ 0xd8200000d84,
0xd8500000d97,
0xd9a00000db2,
0xdb300000dbc,
@@ -1359,11 +1258,18 @@
0xe5000000e5a,
0xe8100000e83,
0xe8400000e85,
- 0xe8600000e8b,
- 0xe8c00000ea4,
+ 0xe8700000e89,
+ 0xe8a00000e8b,
+ 0xe8d00000e8e,
+ 0xe9400000e98,
+ 0xe9900000ea0,
+ 0xea100000ea4,
0xea500000ea6,
- 0xea700000eb3,
- 0xeb400000ebe,
+ 0xea700000ea8,
+ 0xeaa00000eac,
+ 0xead00000eb3,
+ 0xeb400000eba,
+ 0xebb00000ebe,
0xec000000ec5,
0xec600000ec7,
0xec800000ece,
@@ -1425,8 +1331,9 @@
0x16810000169b,
0x16a0000016eb,
0x16f1000016f9,
- 0x170000001716,
- 0x171f00001735,
+ 0x17000000170d,
+ 0x170e00001715,
+ 0x172000001735,
0x174000001754,
0x17600000176d,
0x176e00001771,
@@ -1455,8 +1362,7 @@
0x1a9000001a9a,
0x1aa700001aa8,
0x1ab000001abe,
- 0x1abf00001acf,
- 0x1b0000001b4d,
+ 0x1b0000001b4c,
0x1b5000001b5a,
0x1b6b00001b74,
0x1b8000001bf4,
@@ -1464,14 +1370,15 @@
0x1c4000001c4a,
0x1c4d00001c7e,
0x1cd000001cd3,
- 0x1cd400001cfb,
+ 0x1cd400001cfa,
0x1d0000001d2c,
0x1d2f00001d30,
0x1d3b00001d3c,
0x1d4e00001d4f,
0x1d6b00001d78,
0x1d7900001d9b,
- 0x1dc000001e00,
+ 0x1dc000001dfa,
+ 0x1dfb00001e00,
0x1e0100001e02,
0x1e0300001e04,
0x1e0500001e06,
@@ -1620,7 +1527,7 @@
0x1ff600001ff7,
0x214e0000214f,
0x218400002185,
- 0x2c3000002c60,
+ 0x2c3000002c5f,
0x2c6100002c62,
0x2c6500002c67,
0x2c6800002c69,
@@ -1706,10 +1613,11 @@
0x30a1000030fb,
0x30fc000030ff,
0x310500003130,
- 0x31a0000031c0,
+ 0x31a0000031bb,
0x31f000003200,
- 0x340000004dc0,
- 0x4e000000a48d,
+ 0x340000004db6,
+ 0x4e0000009ff0,
+ 0xa0000000a48d,
0xa4d00000a4fe,
0xa5000000a60d,
0xa6100000a62c,
@@ -1819,22 +1727,8 @@
0xa7b50000a7b6,
0xa7b70000a7b8,
0xa7b90000a7ba,
- 0xa7bb0000a7bc,
- 0xa7bd0000a7be,
- 0xa7bf0000a7c0,
- 0xa7c10000a7c2,
- 0xa7c30000a7c4,
- 0xa7c80000a7c9,
- 0xa7ca0000a7cb,
- 0xa7d10000a7d2,
- 0xa7d30000a7d4,
- 0xa7d50000a7d6,
- 0xa7d70000a7d8,
- 0xa7d90000a7da,
- 0xa7f20000a7f5,
- 0xa7f60000a7f8,
+ 0xa7f70000a7f8,
0xa7fa0000a828,
- 0xa82c0000a82d,
0xa8400000a874,
0xa8800000a8c6,
0xa8d00000a8da,
@@ -1859,7 +1753,7 @@
0xab200000ab27,
0xab280000ab2f,
0xab300000ab5b,
- 0xab600000ab6a,
+ 0xab600000ab66,
0xabc00000abeb,
0xabec0000abee,
0xabf00000abfa,
@@ -1897,16 +1791,9 @@
0x104d8000104fc,
0x1050000010528,
0x1053000010564,
- 0x10597000105a2,
- 0x105a3000105b2,
- 0x105b3000105ba,
- 0x105bb000105bd,
0x1060000010737,
0x1074000010756,
0x1076000010768,
- 0x1078000010786,
- 0x10787000107b1,
- 0x107b2000107bb,
0x1080000010806,
0x1080800010809,
0x1080a00010836,
@@ -1940,29 +1827,22 @@
0x10cc000010cf3,
0x10d0000010d28,
0x10d3000010d3a,
- 0x10e8000010eaa,
- 0x10eab00010ead,
- 0x10eb000010eb2,
0x10f0000010f1d,
0x10f2700010f28,
0x10f3000010f51,
- 0x10f7000010f86,
- 0x10fb000010fc5,
- 0x10fe000010ff7,
0x1100000011047,
- 0x1106600011076,
+ 0x1106600011070,
0x1107f000110bb,
- 0x110c2000110c3,
0x110d0000110e9,
0x110f0000110fa,
0x1110000011135,
0x1113600011140,
- 0x1114400011148,
+ 0x1114400011147,
0x1115000011174,
0x1117600011177,
0x11180000111c5,
0x111c9000111cd,
- 0x111ce000111db,
+ 0x111d0000111db,
0x111dc000111dd,
0x1120000011212,
0x1121300011238,
@@ -1991,7 +1871,7 @@
0x1137000011375,
0x114000001144b,
0x114500001145a,
- 0x1145e00011462,
+ 0x1145e0001145f,
0x11480000114c6,
0x114c7000114c8,
0x114d0000114da,
@@ -2001,31 +1881,20 @@
0x1160000011641,
0x1164400011645,
0x116500001165a,
- 0x11680000116b9,
+ 0x11680000116b8,
0x116c0000116ca,
0x117000001171b,
0x1171d0001172c,
0x117300001173a,
- 0x1174000011747,
0x118000001183b,
0x118c0000118ea,
- 0x118ff00011907,
- 0x119090001190a,
- 0x1190c00011914,
- 0x1191500011917,
- 0x1191800011936,
- 0x1193700011939,
- 0x1193b00011944,
- 0x119500001195a,
- 0x119a0000119a8,
- 0x119aa000119d8,
- 0x119da000119e2,
- 0x119e3000119e5,
+ 0x118ff00011900,
0x11a0000011a3f,
0x11a4700011a48,
- 0x11a5000011a9a,
+ 0x11a5000011a84,
+ 0x11a8600011a9a,
0x11a9d00011a9e,
- 0x11ab000011af9,
+ 0x11ac000011af9,
0x11c0000011c09,
0x11c0a00011c37,
0x11c3800011c41,
@@ -2047,17 +1916,13 @@
0x11d9300011d99,
0x11da000011daa,
0x11ee000011ef7,
- 0x11fb000011fb1,
0x120000001239a,
0x1248000012544,
- 0x12f9000012ff1,
0x130000001342f,
0x1440000014647,
0x1680000016a39,
0x16a4000016a5f,
0x16a6000016a6a,
- 0x16a7000016abf,
- 0x16ac000016aca,
0x16ad000016aee,
0x16af000016af5,
0x16b0000016b37,
@@ -2066,62 +1931,39 @@
0x16b6300016b78,
0x16b7d00016b90,
0x16e6000016e80,
- 0x16f0000016f4b,
- 0x16f4f00016f88,
+ 0x16f0000016f45,
+ 0x16f5000016f7f,
0x16f8f00016fa0,
0x16fe000016fe2,
- 0x16fe300016fe5,
- 0x16ff000016ff2,
- 0x17000000187f8,
- 0x1880000018cd6,
- 0x18d0000018d09,
- 0x1aff00001aff4,
- 0x1aff50001affc,
- 0x1affd0001afff,
- 0x1b0000001b123,
- 0x1b1500001b153,
- 0x1b1640001b168,
+ 0x17000000187f2,
+ 0x1880000018af3,
+ 0x1b0000001b11f,
0x1b1700001b2fc,
0x1bc000001bc6b,
0x1bc700001bc7d,
0x1bc800001bc89,
0x1bc900001bc9a,
0x1bc9d0001bc9f,
- 0x1cf000001cf2e,
- 0x1cf300001cf47,
0x1da000001da37,
0x1da3b0001da6d,
0x1da750001da76,
0x1da840001da85,
0x1da9b0001daa0,
0x1daa10001dab0,
- 0x1df000001df1f,
0x1e0000001e007,
0x1e0080001e019,
0x1e01b0001e022,
0x1e0230001e025,
0x1e0260001e02b,
- 0x1e1000001e12d,
- 0x1e1300001e13e,
- 0x1e1400001e14a,
- 0x1e14e0001e14f,
- 0x1e2900001e2af,
- 0x1e2c00001e2fa,
- 0x1e7e00001e7e7,
- 0x1e7e80001e7ec,
- 0x1e7ed0001e7ef,
- 0x1e7f00001e7ff,
0x1e8000001e8c5,
0x1e8d00001e8d7,
- 0x1e9220001e94c,
+ 0x1e9220001e94b,
0x1e9500001e95a,
- 0x1fbf00001fbfa,
- 0x200000002a6e0,
- 0x2a7000002b739,
+ 0x200000002a6d7,
+ 0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
- 0x300000003134b,
),
'CONTEXTJ': (
0x200c0000200e,
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/intranges.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/intranges.py
old mode 100644
new mode 100755
index 6a43b047..fa8a7356
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/intranges.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/intranges.py
@@ -6,9 +6,8 @@
"""
import bisect
-from typing import List, Tuple
-def intranges_from_list(list_: List[int]) -> Tuple[int, ...]:
+def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
@@ -29,14 +28,14 @@ def intranges_from_list(list_: List[int]) -> Tuple[int, ...]:
return tuple(ranges)
-def _encode_range(start: int, end: int) -> int:
+def _encode_range(start, end):
return (start << 32) | end
-def _decode_range(r: int) -> Tuple[int, int]:
+def _decode_range(r):
return (r >> 32), (r & ((1 << 32) - 1))
-def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool:
+def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = _encode_range(int_, 0)
pos = bisect.bisect_left(ranges, tuple_)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/package_data.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/package_data.py
old mode 100644
new mode 100755
index f5ea87c1..257e8989
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/package_data.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/package_data.py
@@ -1,2 +1,2 @@
-__version__ = '3.3'
+__version__ = '2.8'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/py.typed b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/py.typed
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/uts46data.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/uts46data.py
old mode 100644
new mode 100755
index 8f65705e..a68ed4c0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/uts46data.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/idna/uts46data.py
@@ -1,14 +1,11 @@
# This file is automatically generated by tools/idna-data
# vim: set fileencoding=utf-8 :
-from typing import List, Tuple, Union
-
-
"""IDNA Mapping Table from UTS46."""
-__version__ = '14.0.0'
-def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+__version__ = "11.0.0"
+def _seg_0():
return [
(0x0, '3'),
(0x1, '3'),
@@ -75,32 +72,32 @@ def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x3E, '3'),
(0x3F, '3'),
(0x40, '3'),
- (0x41, 'M', 'a'),
- (0x42, 'M', 'b'),
- (0x43, 'M', 'c'),
- (0x44, 'M', 'd'),
- (0x45, 'M', 'e'),
- (0x46, 'M', 'f'),
- (0x47, 'M', 'g'),
- (0x48, 'M', 'h'),
- (0x49, 'M', 'i'),
- (0x4A, 'M', 'j'),
- (0x4B, 'M', 'k'),
- (0x4C, 'M', 'l'),
- (0x4D, 'M', 'm'),
- (0x4E, 'M', 'n'),
- (0x4F, 'M', 'o'),
- (0x50, 'M', 'p'),
- (0x51, 'M', 'q'),
- (0x52, 'M', 'r'),
- (0x53, 'M', 's'),
- (0x54, 'M', 't'),
- (0x55, 'M', 'u'),
- (0x56, 'M', 'v'),
- (0x57, 'M', 'w'),
- (0x58, 'M', 'x'),
- (0x59, 'M', 'y'),
- (0x5A, 'M', 'z'),
+ (0x41, 'M', u'a'),
+ (0x42, 'M', u'b'),
+ (0x43, 'M', u'c'),
+ (0x44, 'M', u'd'),
+ (0x45, 'M', u'e'),
+ (0x46, 'M', u'f'),
+ (0x47, 'M', u'g'),
+ (0x48, 'M', u'h'),
+ (0x49, 'M', u'i'),
+ (0x4A, 'M', u'j'),
+ (0x4B, 'M', u'k'),
+ (0x4C, 'M', u'l'),
+ (0x4D, 'M', u'm'),
+ (0x4E, 'M', u'n'),
+ (0x4F, 'M', u'o'),
+ (0x50, 'M', u'p'),
+ (0x51, 'M', u'q'),
+ (0x52, 'M', u'r'),
+ (0x53, 'M', u's'),
+ (0x54, 'M', u't'),
+ (0x55, 'M', u'u'),
+ (0x56, 'M', u'v'),
+ (0x57, 'M', u'w'),
+ (0x58, 'M', u'x'),
+ (0x59, 'M', u'y'),
+ (0x5A, 'M', u'z'),
(0x5B, '3'),
(0x5C, '3'),
(0x5D, '3'),
@@ -112,7 +109,7 @@ def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x63, 'V'),
]
-def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_1():
return [
(0x64, 'V'),
(0x65, 'V'),
@@ -174,7 +171,7 @@ def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x9D, 'X'),
(0x9E, 'X'),
(0x9F, 'X'),
- (0xA0, '3', ' '),
+ (0xA0, '3', u' '),
(0xA1, 'V'),
(0xA2, 'V'),
(0xA3, 'V'),
@@ -182,66 +179,66 @@ def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xA5, 'V'),
(0xA6, 'V'),
(0xA7, 'V'),
- (0xA8, '3', ' ̈'),
+ (0xA8, '3', u' ̈'),
(0xA9, 'V'),
- (0xAA, 'M', 'a'),
+ (0xAA, 'M', u'a'),
(0xAB, 'V'),
(0xAC, 'V'),
(0xAD, 'I'),
(0xAE, 'V'),
- (0xAF, '3', ' ̄'),
+ (0xAF, '3', u' ̄'),
(0xB0, 'V'),
(0xB1, 'V'),
- (0xB2, 'M', '2'),
- (0xB3, 'M', '3'),
- (0xB4, '3', ' ́'),
- (0xB5, 'M', 'μ'),
+ (0xB2, 'M', u'2'),
+ (0xB3, 'M', u'3'),
+ (0xB4, '3', u' ́'),
+ (0xB5, 'M', u'μ'),
(0xB6, 'V'),
(0xB7, 'V'),
- (0xB8, '3', ' ̧'),
- (0xB9, 'M', '1'),
- (0xBA, 'M', 'o'),
+ (0xB8, '3', u' ̧'),
+ (0xB9, 'M', u'1'),
+ (0xBA, 'M', u'o'),
(0xBB, 'V'),
- (0xBC, 'M', '1⁄4'),
- (0xBD, 'M', '1⁄2'),
- (0xBE, 'M', '3⁄4'),
+ (0xBC, 'M', u'1⁄4'),
+ (0xBD, 'M', u'1⁄2'),
+ (0xBE, 'M', u'3⁄4'),
(0xBF, 'V'),
- (0xC0, 'M', 'à'),
- (0xC1, 'M', 'á'),
- (0xC2, 'M', 'â'),
- (0xC3, 'M', 'ã'),
- (0xC4, 'M', 'ä'),
- (0xC5, 'M', 'å'),
- (0xC6, 'M', 'æ'),
- (0xC7, 'M', 'ç'),
+ (0xC0, 'M', u'à'),
+ (0xC1, 'M', u'á'),
+ (0xC2, 'M', u'â'),
+ (0xC3, 'M', u'ã'),
+ (0xC4, 'M', u'ä'),
+ (0xC5, 'M', u'å'),
+ (0xC6, 'M', u'æ'),
+ (0xC7, 'M', u'ç'),
]
-def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_2():
return [
- (0xC8, 'M', 'è'),
- (0xC9, 'M', 'é'),
- (0xCA, 'M', 'ê'),
- (0xCB, 'M', 'ë'),
- (0xCC, 'M', 'ì'),
- (0xCD, 'M', 'í'),
- (0xCE, 'M', 'î'),
- (0xCF, 'M', 'ï'),
- (0xD0, 'M', 'ð'),
- (0xD1, 'M', 'ñ'),
- (0xD2, 'M', 'ò'),
- (0xD3, 'M', 'ó'),
- (0xD4, 'M', 'ô'),
- (0xD5, 'M', 'õ'),
- (0xD6, 'M', 'ö'),
+ (0xC8, 'M', u'è'),
+ (0xC9, 'M', u'é'),
+ (0xCA, 'M', u'ê'),
+ (0xCB, 'M', u'ë'),
+ (0xCC, 'M', u'ì'),
+ (0xCD, 'M', u'í'),
+ (0xCE, 'M', u'î'),
+ (0xCF, 'M', u'ï'),
+ (0xD0, 'M', u'ð'),
+ (0xD1, 'M', u'ñ'),
+ (0xD2, 'M', u'ò'),
+ (0xD3, 'M', u'ó'),
+ (0xD4, 'M', u'ô'),
+ (0xD5, 'M', u'õ'),
+ (0xD6, 'M', u'ö'),
(0xD7, 'V'),
- (0xD8, 'M', 'ø'),
- (0xD9, 'M', 'ù'),
- (0xDA, 'M', 'ú'),
- (0xDB, 'M', 'û'),
- (0xDC, 'M', 'ü'),
- (0xDD, 'M', 'ý'),
- (0xDE, 'M', 'þ'),
- (0xDF, 'D', 'ss'),
+ (0xD8, 'M', u'ø'),
+ (0xD9, 'M', u'ù'),
+ (0xDA, 'M', u'ú'),
+ (0xDB, 'M', u'û'),
+ (0xDC, 'M', u'ü'),
+ (0xDD, 'M', u'ý'),
+ (0xDE, 'M', u'þ'),
+ (0xDF, 'D', u'ss'),
(0xE0, 'V'),
(0xE1, 'V'),
(0xE2, 'V'),
@@ -274,765 +271,765 @@ def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xFD, 'V'),
(0xFE, 'V'),
(0xFF, 'V'),
- (0x100, 'M', 'ā'),
+ (0x100, 'M', u'ā'),
(0x101, 'V'),
- (0x102, 'M', 'ă'),
+ (0x102, 'M', u'ă'),
(0x103, 'V'),
- (0x104, 'M', 'ą'),
+ (0x104, 'M', u'ą'),
(0x105, 'V'),
- (0x106, 'M', 'ć'),
+ (0x106, 'M', u'ć'),
(0x107, 'V'),
- (0x108, 'M', 'ĉ'),
+ (0x108, 'M', u'ĉ'),
(0x109, 'V'),
- (0x10A, 'M', 'ċ'),
+ (0x10A, 'M', u'ċ'),
(0x10B, 'V'),
- (0x10C, 'M', 'č'),
+ (0x10C, 'M', u'č'),
(0x10D, 'V'),
- (0x10E, 'M', 'ď'),
+ (0x10E, 'M', u'ď'),
(0x10F, 'V'),
- (0x110, 'M', 'đ'),
+ (0x110, 'M', u'đ'),
(0x111, 'V'),
- (0x112, 'M', 'ē'),
+ (0x112, 'M', u'ē'),
(0x113, 'V'),
- (0x114, 'M', 'ĕ'),
+ (0x114, 'M', u'ĕ'),
(0x115, 'V'),
- (0x116, 'M', 'ė'),
+ (0x116, 'M', u'ė'),
(0x117, 'V'),
- (0x118, 'M', 'ę'),
+ (0x118, 'M', u'ę'),
(0x119, 'V'),
- (0x11A, 'M', 'ě'),
+ (0x11A, 'M', u'ě'),
(0x11B, 'V'),
- (0x11C, 'M', 'ĝ'),
+ (0x11C, 'M', u'ĝ'),
(0x11D, 'V'),
- (0x11E, 'M', 'ğ'),
+ (0x11E, 'M', u'ğ'),
(0x11F, 'V'),
- (0x120, 'M', 'ġ'),
+ (0x120, 'M', u'ġ'),
(0x121, 'V'),
- (0x122, 'M', 'ģ'),
+ (0x122, 'M', u'ģ'),
(0x123, 'V'),
- (0x124, 'M', 'ĥ'),
+ (0x124, 'M', u'ĥ'),
(0x125, 'V'),
- (0x126, 'M', 'ħ'),
+ (0x126, 'M', u'ħ'),
(0x127, 'V'),
- (0x128, 'M', 'ĩ'),
+ (0x128, 'M', u'ĩ'),
(0x129, 'V'),
- (0x12A, 'M', 'ī'),
+ (0x12A, 'M', u'ī'),
(0x12B, 'V'),
]
-def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_3():
return [
- (0x12C, 'M', 'ĭ'),
+ (0x12C, 'M', u'ĭ'),
(0x12D, 'V'),
- (0x12E, 'M', 'į'),
+ (0x12E, 'M', u'į'),
(0x12F, 'V'),
- (0x130, 'M', 'i̇'),
+ (0x130, 'M', u'i̇'),
(0x131, 'V'),
- (0x132, 'M', 'ij'),
- (0x134, 'M', 'ĵ'),
+ (0x132, 'M', u'ij'),
+ (0x134, 'M', u'ĵ'),
(0x135, 'V'),
- (0x136, 'M', 'ķ'),
+ (0x136, 'M', u'ķ'),
(0x137, 'V'),
- (0x139, 'M', 'ĺ'),
+ (0x139, 'M', u'ĺ'),
(0x13A, 'V'),
- (0x13B, 'M', 'ļ'),
+ (0x13B, 'M', u'ļ'),
(0x13C, 'V'),
- (0x13D, 'M', 'ľ'),
+ (0x13D, 'M', u'ľ'),
(0x13E, 'V'),
- (0x13F, 'M', 'l·'),
- (0x141, 'M', 'ł'),
+ (0x13F, 'M', u'l·'),
+ (0x141, 'M', u'ł'),
(0x142, 'V'),
- (0x143, 'M', 'ń'),
+ (0x143, 'M', u'ń'),
(0x144, 'V'),
- (0x145, 'M', 'ņ'),
+ (0x145, 'M', u'ņ'),
(0x146, 'V'),
- (0x147, 'M', 'ň'),
+ (0x147, 'M', u'ň'),
(0x148, 'V'),
- (0x149, 'M', 'ʼn'),
- (0x14A, 'M', 'ŋ'),
+ (0x149, 'M', u'ʼn'),
+ (0x14A, 'M', u'ŋ'),
(0x14B, 'V'),
- (0x14C, 'M', 'ō'),
+ (0x14C, 'M', u'ō'),
(0x14D, 'V'),
- (0x14E, 'M', 'ŏ'),
+ (0x14E, 'M', u'ŏ'),
(0x14F, 'V'),
- (0x150, 'M', 'ő'),
+ (0x150, 'M', u'ő'),
(0x151, 'V'),
- (0x152, 'M', 'œ'),
+ (0x152, 'M', u'œ'),
(0x153, 'V'),
- (0x154, 'M', 'ŕ'),
+ (0x154, 'M', u'ŕ'),
(0x155, 'V'),
- (0x156, 'M', 'ŗ'),
+ (0x156, 'M', u'ŗ'),
(0x157, 'V'),
- (0x158, 'M', 'ř'),
+ (0x158, 'M', u'ř'),
(0x159, 'V'),
- (0x15A, 'M', 'ś'),
+ (0x15A, 'M', u'ś'),
(0x15B, 'V'),
- (0x15C, 'M', 'ŝ'),
+ (0x15C, 'M', u'ŝ'),
(0x15D, 'V'),
- (0x15E, 'M', 'ş'),
+ (0x15E, 'M', u'ş'),
(0x15F, 'V'),
- (0x160, 'M', 'š'),
+ (0x160, 'M', u'š'),
(0x161, 'V'),
- (0x162, 'M', 'ţ'),
+ (0x162, 'M', u'ţ'),
(0x163, 'V'),
- (0x164, 'M', 'ť'),
+ (0x164, 'M', u'ť'),
(0x165, 'V'),
- (0x166, 'M', 'ŧ'),
+ (0x166, 'M', u'ŧ'),
(0x167, 'V'),
- (0x168, 'M', 'ũ'),
+ (0x168, 'M', u'ũ'),
(0x169, 'V'),
- (0x16A, 'M', 'ū'),
+ (0x16A, 'M', u'ū'),
(0x16B, 'V'),
- (0x16C, 'M', 'ŭ'),
+ (0x16C, 'M', u'ŭ'),
(0x16D, 'V'),
- (0x16E, 'M', 'ů'),
+ (0x16E, 'M', u'ů'),
(0x16F, 'V'),
- (0x170, 'M', 'ű'),
+ (0x170, 'M', u'ű'),
(0x171, 'V'),
- (0x172, 'M', 'ų'),
+ (0x172, 'M', u'ų'),
(0x173, 'V'),
- (0x174, 'M', 'ŵ'),
+ (0x174, 'M', u'ŵ'),
(0x175, 'V'),
- (0x176, 'M', 'ŷ'),
+ (0x176, 'M', u'ŷ'),
(0x177, 'V'),
- (0x178, 'M', 'ÿ'),
- (0x179, 'M', 'ź'),
+ (0x178, 'M', u'ÿ'),
+ (0x179, 'M', u'ź'),
(0x17A, 'V'),
- (0x17B, 'M', 'ż'),
+ (0x17B, 'M', u'ż'),
(0x17C, 'V'),
- (0x17D, 'M', 'ž'),
+ (0x17D, 'M', u'ž'),
(0x17E, 'V'),
- (0x17F, 'M', 's'),
+ (0x17F, 'M', u's'),
(0x180, 'V'),
- (0x181, 'M', 'ɓ'),
- (0x182, 'M', 'ƃ'),
+ (0x181, 'M', u'ɓ'),
+ (0x182, 'M', u'ƃ'),
(0x183, 'V'),
- (0x184, 'M', 'ƅ'),
+ (0x184, 'M', u'ƅ'),
(0x185, 'V'),
- (0x186, 'M', 'ɔ'),
- (0x187, 'M', 'ƈ'),
+ (0x186, 'M', u'ɔ'),
+ (0x187, 'M', u'ƈ'),
(0x188, 'V'),
- (0x189, 'M', 'ɖ'),
- (0x18A, 'M', 'ɗ'),
- (0x18B, 'M', 'ƌ'),
+ (0x189, 'M', u'ɖ'),
+ (0x18A, 'M', u'ɗ'),
+ (0x18B, 'M', u'ƌ'),
(0x18C, 'V'),
- (0x18E, 'M', 'ǝ'),
- (0x18F, 'M', 'ə'),
- (0x190, 'M', 'ɛ'),
- (0x191, 'M', 'ƒ'),
+ (0x18E, 'M', u'ǝ'),
+ (0x18F, 'M', u'ə'),
+ (0x190, 'M', u'ɛ'),
+ (0x191, 'M', u'ƒ'),
(0x192, 'V'),
- (0x193, 'M', 'ɠ'),
+ (0x193, 'M', u'ɠ'),
]
-def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_4():
return [
- (0x194, 'M', 'ɣ'),
+ (0x194, 'M', u'ɣ'),
(0x195, 'V'),
- (0x196, 'M', 'ɩ'),
- (0x197, 'M', 'ɨ'),
- (0x198, 'M', 'ƙ'),
+ (0x196, 'M', u'ɩ'),
+ (0x197, 'M', u'ɨ'),
+ (0x198, 'M', u'ƙ'),
(0x199, 'V'),
- (0x19C, 'M', 'ɯ'),
- (0x19D, 'M', 'ɲ'),
+ (0x19C, 'M', u'ɯ'),
+ (0x19D, 'M', u'ɲ'),
(0x19E, 'V'),
- (0x19F, 'M', 'ɵ'),
- (0x1A0, 'M', 'ơ'),
+ (0x19F, 'M', u'ɵ'),
+ (0x1A0, 'M', u'ơ'),
(0x1A1, 'V'),
- (0x1A2, 'M', 'ƣ'),
+ (0x1A2, 'M', u'ƣ'),
(0x1A3, 'V'),
- (0x1A4, 'M', 'ƥ'),
+ (0x1A4, 'M', u'ƥ'),
(0x1A5, 'V'),
- (0x1A6, 'M', 'ʀ'),
- (0x1A7, 'M', 'ƨ'),
+ (0x1A6, 'M', u'ʀ'),
+ (0x1A7, 'M', u'ƨ'),
(0x1A8, 'V'),
- (0x1A9, 'M', 'ʃ'),
+ (0x1A9, 'M', u'ʃ'),
(0x1AA, 'V'),
- (0x1AC, 'M', 'ƭ'),
+ (0x1AC, 'M', u'ƭ'),
(0x1AD, 'V'),
- (0x1AE, 'M', 'ʈ'),
- (0x1AF, 'M', 'ư'),
+ (0x1AE, 'M', u'ʈ'),
+ (0x1AF, 'M', u'ư'),
(0x1B0, 'V'),
- (0x1B1, 'M', 'ʊ'),
- (0x1B2, 'M', 'ʋ'),
- (0x1B3, 'M', 'ƴ'),
+ (0x1B1, 'M', u'ʊ'),
+ (0x1B2, 'M', u'ʋ'),
+ (0x1B3, 'M', u'ƴ'),
(0x1B4, 'V'),
- (0x1B5, 'M', 'ƶ'),
+ (0x1B5, 'M', u'ƶ'),
(0x1B6, 'V'),
- (0x1B7, 'M', 'ʒ'),
- (0x1B8, 'M', 'ƹ'),
+ (0x1B7, 'M', u'ʒ'),
+ (0x1B8, 'M', u'ƹ'),
(0x1B9, 'V'),
- (0x1BC, 'M', 'ƽ'),
+ (0x1BC, 'M', u'ƽ'),
(0x1BD, 'V'),
- (0x1C4, 'M', 'dž'),
- (0x1C7, 'M', 'lj'),
- (0x1CA, 'M', 'nj'),
- (0x1CD, 'M', 'ǎ'),
+ (0x1C4, 'M', u'dž'),
+ (0x1C7, 'M', u'lj'),
+ (0x1CA, 'M', u'nj'),
+ (0x1CD, 'M', u'ǎ'),
(0x1CE, 'V'),
- (0x1CF, 'M', 'ǐ'),
+ (0x1CF, 'M', u'ǐ'),
(0x1D0, 'V'),
- (0x1D1, 'M', 'ǒ'),
+ (0x1D1, 'M', u'ǒ'),
(0x1D2, 'V'),
- (0x1D3, 'M', 'ǔ'),
+ (0x1D3, 'M', u'ǔ'),
(0x1D4, 'V'),
- (0x1D5, 'M', 'ǖ'),
+ (0x1D5, 'M', u'ǖ'),
(0x1D6, 'V'),
- (0x1D7, 'M', 'ǘ'),
+ (0x1D7, 'M', u'ǘ'),
(0x1D8, 'V'),
- (0x1D9, 'M', 'ǚ'),
+ (0x1D9, 'M', u'ǚ'),
(0x1DA, 'V'),
- (0x1DB, 'M', 'ǜ'),
+ (0x1DB, 'M', u'ǜ'),
(0x1DC, 'V'),
- (0x1DE, 'M', 'ǟ'),
+ (0x1DE, 'M', u'ǟ'),
(0x1DF, 'V'),
- (0x1E0, 'M', 'ǡ'),
+ (0x1E0, 'M', u'ǡ'),
(0x1E1, 'V'),
- (0x1E2, 'M', 'ǣ'),
+ (0x1E2, 'M', u'ǣ'),
(0x1E3, 'V'),
- (0x1E4, 'M', 'ǥ'),
+ (0x1E4, 'M', u'ǥ'),
(0x1E5, 'V'),
- (0x1E6, 'M', 'ǧ'),
+ (0x1E6, 'M', u'ǧ'),
(0x1E7, 'V'),
- (0x1E8, 'M', 'ǩ'),
+ (0x1E8, 'M', u'ǩ'),
(0x1E9, 'V'),
- (0x1EA, 'M', 'ǫ'),
+ (0x1EA, 'M', u'ǫ'),
(0x1EB, 'V'),
- (0x1EC, 'M', 'ǭ'),
+ (0x1EC, 'M', u'ǭ'),
(0x1ED, 'V'),
- (0x1EE, 'M', 'ǯ'),
+ (0x1EE, 'M', u'ǯ'),
(0x1EF, 'V'),
- (0x1F1, 'M', 'dz'),
- (0x1F4, 'M', 'ǵ'),
+ (0x1F1, 'M', u'dz'),
+ (0x1F4, 'M', u'ǵ'),
(0x1F5, 'V'),
- (0x1F6, 'M', 'ƕ'),
- (0x1F7, 'M', 'ƿ'),
- (0x1F8, 'M', 'ǹ'),
+ (0x1F6, 'M', u'ƕ'),
+ (0x1F7, 'M', u'ƿ'),
+ (0x1F8, 'M', u'ǹ'),
(0x1F9, 'V'),
- (0x1FA, 'M', 'ǻ'),
+ (0x1FA, 'M', u'ǻ'),
(0x1FB, 'V'),
- (0x1FC, 'M', 'ǽ'),
+ (0x1FC, 'M', u'ǽ'),
(0x1FD, 'V'),
- (0x1FE, 'M', 'ǿ'),
+ (0x1FE, 'M', u'ǿ'),
(0x1FF, 'V'),
- (0x200, 'M', 'ȁ'),
+ (0x200, 'M', u'ȁ'),
(0x201, 'V'),
- (0x202, 'M', 'ȃ'),
+ (0x202, 'M', u'ȃ'),
(0x203, 'V'),
- (0x204, 'M', 'ȅ'),
+ (0x204, 'M', u'ȅ'),
(0x205, 'V'),
- (0x206, 'M', 'ȇ'),
+ (0x206, 'M', u'ȇ'),
(0x207, 'V'),
- (0x208, 'M', 'ȉ'),
+ (0x208, 'M', u'ȉ'),
(0x209, 'V'),
- (0x20A, 'M', 'ȋ'),
+ (0x20A, 'M', u'ȋ'),
(0x20B, 'V'),
- (0x20C, 'M', 'ȍ'),
+ (0x20C, 'M', u'ȍ'),
]
-def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_5():
return [
(0x20D, 'V'),
- (0x20E, 'M', 'ȏ'),
+ (0x20E, 'M', u'ȏ'),
(0x20F, 'V'),
- (0x210, 'M', 'ȑ'),
+ (0x210, 'M', u'ȑ'),
(0x211, 'V'),
- (0x212, 'M', 'ȓ'),
+ (0x212, 'M', u'ȓ'),
(0x213, 'V'),
- (0x214, 'M', 'ȕ'),
+ (0x214, 'M', u'ȕ'),
(0x215, 'V'),
- (0x216, 'M', 'ȗ'),
+ (0x216, 'M', u'ȗ'),
(0x217, 'V'),
- (0x218, 'M', 'ș'),
+ (0x218, 'M', u'ș'),
(0x219, 'V'),
- (0x21A, 'M', 'ț'),
+ (0x21A, 'M', u'ț'),
(0x21B, 'V'),
- (0x21C, 'M', 'ȝ'),
+ (0x21C, 'M', u'ȝ'),
(0x21D, 'V'),
- (0x21E, 'M', 'ȟ'),
+ (0x21E, 'M', u'ȟ'),
(0x21F, 'V'),
- (0x220, 'M', 'ƞ'),
+ (0x220, 'M', u'ƞ'),
(0x221, 'V'),
- (0x222, 'M', 'ȣ'),
+ (0x222, 'M', u'ȣ'),
(0x223, 'V'),
- (0x224, 'M', 'ȥ'),
+ (0x224, 'M', u'ȥ'),
(0x225, 'V'),
- (0x226, 'M', 'ȧ'),
+ (0x226, 'M', u'ȧ'),
(0x227, 'V'),
- (0x228, 'M', 'ȩ'),
+ (0x228, 'M', u'ȩ'),
(0x229, 'V'),
- (0x22A, 'M', 'ȫ'),
+ (0x22A, 'M', u'ȫ'),
(0x22B, 'V'),
- (0x22C, 'M', 'ȭ'),
+ (0x22C, 'M', u'ȭ'),
(0x22D, 'V'),
- (0x22E, 'M', 'ȯ'),
+ (0x22E, 'M', u'ȯ'),
(0x22F, 'V'),
- (0x230, 'M', 'ȱ'),
+ (0x230, 'M', u'ȱ'),
(0x231, 'V'),
- (0x232, 'M', 'ȳ'),
+ (0x232, 'M', u'ȳ'),
(0x233, 'V'),
- (0x23A, 'M', 'ⱥ'),
- (0x23B, 'M', 'ȼ'),
+ (0x23A, 'M', u'ⱥ'),
+ (0x23B, 'M', u'ȼ'),
(0x23C, 'V'),
- (0x23D, 'M', 'ƚ'),
- (0x23E, 'M', 'ⱦ'),
+ (0x23D, 'M', u'ƚ'),
+ (0x23E, 'M', u'ⱦ'),
(0x23F, 'V'),
- (0x241, 'M', 'ɂ'),
+ (0x241, 'M', u'ɂ'),
(0x242, 'V'),
- (0x243, 'M', 'ƀ'),
- (0x244, 'M', 'ʉ'),
- (0x245, 'M', 'ʌ'),
- (0x246, 'M', 'ɇ'),
+ (0x243, 'M', u'ƀ'),
+ (0x244, 'M', u'ʉ'),
+ (0x245, 'M', u'ʌ'),
+ (0x246, 'M', u'ɇ'),
(0x247, 'V'),
- (0x248, 'M', 'ɉ'),
+ (0x248, 'M', u'ɉ'),
(0x249, 'V'),
- (0x24A, 'M', 'ɋ'),
+ (0x24A, 'M', u'ɋ'),
(0x24B, 'V'),
- (0x24C, 'M', 'ɍ'),
+ (0x24C, 'M', u'ɍ'),
(0x24D, 'V'),
- (0x24E, 'M', 'ɏ'),
+ (0x24E, 'M', u'ɏ'),
(0x24F, 'V'),
- (0x2B0, 'M', 'h'),
- (0x2B1, 'M', 'ɦ'),
- (0x2B2, 'M', 'j'),
- (0x2B3, 'M', 'r'),
- (0x2B4, 'M', 'ɹ'),
- (0x2B5, 'M', 'ɻ'),
- (0x2B6, 'M', 'ʁ'),
- (0x2B7, 'M', 'w'),
- (0x2B8, 'M', 'y'),
+ (0x2B0, 'M', u'h'),
+ (0x2B1, 'M', u'ɦ'),
+ (0x2B2, 'M', u'j'),
+ (0x2B3, 'M', u'r'),
+ (0x2B4, 'M', u'ɹ'),
+ (0x2B5, 'M', u'ɻ'),
+ (0x2B6, 'M', u'ʁ'),
+ (0x2B7, 'M', u'w'),
+ (0x2B8, 'M', u'y'),
(0x2B9, 'V'),
- (0x2D8, '3', ' ̆'),
- (0x2D9, '3', ' ̇'),
- (0x2DA, '3', ' ̊'),
- (0x2DB, '3', ' ̨'),
- (0x2DC, '3', ' ̃'),
- (0x2DD, '3', ' ̋'),
+ (0x2D8, '3', u' ̆'),
+ (0x2D9, '3', u' ̇'),
+ (0x2DA, '3', u' ̊'),
+ (0x2DB, '3', u' ̨'),
+ (0x2DC, '3', u' ̃'),
+ (0x2DD, '3', u' ̋'),
(0x2DE, 'V'),
- (0x2E0, 'M', 'ɣ'),
- (0x2E1, 'M', 'l'),
- (0x2E2, 'M', 's'),
- (0x2E3, 'M', 'x'),
- (0x2E4, 'M', 'ʕ'),
+ (0x2E0, 'M', u'ɣ'),
+ (0x2E1, 'M', u'l'),
+ (0x2E2, 'M', u's'),
+ (0x2E3, 'M', u'x'),
+ (0x2E4, 'M', u'ʕ'),
(0x2E5, 'V'),
- (0x340, 'M', '̀'),
- (0x341, 'M', '́'),
+ (0x340, 'M', u'̀'),
+ (0x341, 'M', u'́'),
(0x342, 'V'),
- (0x343, 'M', '̓'),
- (0x344, 'M', '̈́'),
- (0x345, 'M', 'ι'),
+ (0x343, 'M', u'̓'),
+ (0x344, 'M', u'̈́'),
+ (0x345, 'M', u'ι'),
(0x346, 'V'),
(0x34F, 'I'),
(0x350, 'V'),
- (0x370, 'M', 'ͱ'),
+ (0x370, 'M', u'ͱ'),
(0x371, 'V'),
- (0x372, 'M', 'ͳ'),
+ (0x372, 'M', u'ͳ'),
(0x373, 'V'),
- (0x374, 'M', 'ʹ'),
+ (0x374, 'M', u'ʹ'),
(0x375, 'V'),
- (0x376, 'M', 'ͷ'),
+ (0x376, 'M', u'ͷ'),
(0x377, 'V'),
]
-def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_6():
return [
(0x378, 'X'),
- (0x37A, '3', ' ι'),
+ (0x37A, '3', u' ι'),
(0x37B, 'V'),
- (0x37E, '3', ';'),
- (0x37F, 'M', 'ϳ'),
+ (0x37E, '3', u';'),
+ (0x37F, 'M', u'ϳ'),
(0x380, 'X'),
- (0x384, '3', ' ́'),
- (0x385, '3', ' ̈́'),
- (0x386, 'M', 'ά'),
- (0x387, 'M', '·'),
- (0x388, 'M', 'έ'),
- (0x389, 'M', 'ή'),
- (0x38A, 'M', 'ί'),
+ (0x384, '3', u' ́'),
+ (0x385, '3', u' ̈́'),
+ (0x386, 'M', u'ά'),
+ (0x387, 'M', u'·'),
+ (0x388, 'M', u'έ'),
+ (0x389, 'M', u'ή'),
+ (0x38A, 'M', u'ί'),
(0x38B, 'X'),
- (0x38C, 'M', 'ό'),
+ (0x38C, 'M', u'ό'),
(0x38D, 'X'),
- (0x38E, 'M', 'ύ'),
- (0x38F, 'M', 'ώ'),
+ (0x38E, 'M', u'ύ'),
+ (0x38F, 'M', u'ώ'),
(0x390, 'V'),
- (0x391, 'M', 'α'),
- (0x392, 'M', 'β'),
- (0x393, 'M', 'γ'),
- (0x394, 'M', 'δ'),
- (0x395, 'M', 'ε'),
- (0x396, 'M', 'ζ'),
- (0x397, 'M', 'η'),
- (0x398, 'M', 'θ'),
- (0x399, 'M', 'ι'),
- (0x39A, 'M', 'κ'),
- (0x39B, 'M', 'λ'),
- (0x39C, 'M', 'μ'),
- (0x39D, 'M', 'ν'),
- (0x39E, 'M', 'ξ'),
- (0x39F, 'M', 'ο'),
- (0x3A0, 'M', 'π'),
- (0x3A1, 'M', 'ρ'),
+ (0x391, 'M', u'α'),
+ (0x392, 'M', u'β'),
+ (0x393, 'M', u'γ'),
+ (0x394, 'M', u'δ'),
+ (0x395, 'M', u'ε'),
+ (0x396, 'M', u'ζ'),
+ (0x397, 'M', u'η'),
+ (0x398, 'M', u'θ'),
+ (0x399, 'M', u'ι'),
+ (0x39A, 'M', u'κ'),
+ (0x39B, 'M', u'λ'),
+ (0x39C, 'M', u'μ'),
+ (0x39D, 'M', u'ν'),
+ (0x39E, 'M', u'ξ'),
+ (0x39F, 'M', u'ο'),
+ (0x3A0, 'M', u'π'),
+ (0x3A1, 'M', u'ρ'),
(0x3A2, 'X'),
- (0x3A3, 'M', 'σ'),
- (0x3A4, 'M', 'τ'),
- (0x3A5, 'M', 'υ'),
- (0x3A6, 'M', 'φ'),
- (0x3A7, 'M', 'χ'),
- (0x3A8, 'M', 'ψ'),
- (0x3A9, 'M', 'ω'),
- (0x3AA, 'M', 'ϊ'),
- (0x3AB, 'M', 'ϋ'),
+ (0x3A3, 'M', u'σ'),
+ (0x3A4, 'M', u'τ'),
+ (0x3A5, 'M', u'υ'),
+ (0x3A6, 'M', u'φ'),
+ (0x3A7, 'M', u'χ'),
+ (0x3A8, 'M', u'ψ'),
+ (0x3A9, 'M', u'ω'),
+ (0x3AA, 'M', u'ϊ'),
+ (0x3AB, 'M', u'ϋ'),
(0x3AC, 'V'),
- (0x3C2, 'D', 'σ'),
+ (0x3C2, 'D', u'σ'),
(0x3C3, 'V'),
- (0x3CF, 'M', 'ϗ'),
- (0x3D0, 'M', 'β'),
- (0x3D1, 'M', 'θ'),
- (0x3D2, 'M', 'υ'),
- (0x3D3, 'M', 'ύ'),
- (0x3D4, 'M', 'ϋ'),
- (0x3D5, 'M', 'φ'),
- (0x3D6, 'M', 'π'),
+ (0x3CF, 'M', u'ϗ'),
+ (0x3D0, 'M', u'β'),
+ (0x3D1, 'M', u'θ'),
+ (0x3D2, 'M', u'υ'),
+ (0x3D3, 'M', u'ύ'),
+ (0x3D4, 'M', u'ϋ'),
+ (0x3D5, 'M', u'φ'),
+ (0x3D6, 'M', u'π'),
(0x3D7, 'V'),
- (0x3D8, 'M', 'ϙ'),
+ (0x3D8, 'M', u'ϙ'),
(0x3D9, 'V'),
- (0x3DA, 'M', 'ϛ'),
+ (0x3DA, 'M', u'ϛ'),
(0x3DB, 'V'),
- (0x3DC, 'M', 'ϝ'),
+ (0x3DC, 'M', u'ϝ'),
(0x3DD, 'V'),
- (0x3DE, 'M', 'ϟ'),
+ (0x3DE, 'M', u'ϟ'),
(0x3DF, 'V'),
- (0x3E0, 'M', 'ϡ'),
+ (0x3E0, 'M', u'ϡ'),
(0x3E1, 'V'),
- (0x3E2, 'M', 'ϣ'),
+ (0x3E2, 'M', u'ϣ'),
(0x3E3, 'V'),
- (0x3E4, 'M', 'ϥ'),
+ (0x3E4, 'M', u'ϥ'),
(0x3E5, 'V'),
- (0x3E6, 'M', 'ϧ'),
+ (0x3E6, 'M', u'ϧ'),
(0x3E7, 'V'),
- (0x3E8, 'M', 'ϩ'),
+ (0x3E8, 'M', u'ϩ'),
(0x3E9, 'V'),
- (0x3EA, 'M', 'ϫ'),
+ (0x3EA, 'M', u'ϫ'),
(0x3EB, 'V'),
- (0x3EC, 'M', 'ϭ'),
+ (0x3EC, 'M', u'ϭ'),
(0x3ED, 'V'),
- (0x3EE, 'M', 'ϯ'),
+ (0x3EE, 'M', u'ϯ'),
(0x3EF, 'V'),
- (0x3F0, 'M', 'κ'),
- (0x3F1, 'M', 'ρ'),
- (0x3F2, 'M', 'σ'),
+ (0x3F0, 'M', u'κ'),
+ (0x3F1, 'M', u'ρ'),
+ (0x3F2, 'M', u'σ'),
(0x3F3, 'V'),
- (0x3F4, 'M', 'θ'),
- (0x3F5, 'M', 'ε'),
+ (0x3F4, 'M', u'θ'),
+ (0x3F5, 'M', u'ε'),
(0x3F6, 'V'),
- (0x3F7, 'M', 'ϸ'),
+ (0x3F7, 'M', u'ϸ'),
(0x3F8, 'V'),
- (0x3F9, 'M', 'σ'),
- (0x3FA, 'M', 'ϻ'),
+ (0x3F9, 'M', u'σ'),
+ (0x3FA, 'M', u'ϻ'),
(0x3FB, 'V'),
- (0x3FD, 'M', 'ͻ'),
- (0x3FE, 'M', 'ͼ'),
- (0x3FF, 'M', 'ͽ'),
- (0x400, 'M', 'ѐ'),
- (0x401, 'M', 'ё'),
- (0x402, 'M', 'ђ'),
+ (0x3FD, 'M', u'ͻ'),
+ (0x3FE, 'M', u'ͼ'),
+ (0x3FF, 'M', u'ͽ'),
+ (0x400, 'M', u'ѐ'),
+ (0x401, 'M', u'ё'),
+ (0x402, 'M', u'ђ'),
]
-def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_7():
return [
- (0x403, 'M', 'ѓ'),
- (0x404, 'M', 'є'),
- (0x405, 'M', 'ѕ'),
- (0x406, 'M', 'і'),
- (0x407, 'M', 'ї'),
- (0x408, 'M', 'ј'),
- (0x409, 'M', 'љ'),
- (0x40A, 'M', 'њ'),
- (0x40B, 'M', 'ћ'),
- (0x40C, 'M', 'ќ'),
- (0x40D, 'M', 'ѝ'),
- (0x40E, 'M', 'ў'),
- (0x40F, 'M', 'џ'),
- (0x410, 'M', 'а'),
- (0x411, 'M', 'б'),
- (0x412, 'M', 'в'),
- (0x413, 'M', 'г'),
- (0x414, 'M', 'д'),
- (0x415, 'M', 'е'),
- (0x416, 'M', 'ж'),
- (0x417, 'M', 'з'),
- (0x418, 'M', 'и'),
- (0x419, 'M', 'й'),
- (0x41A, 'M', 'к'),
- (0x41B, 'M', 'л'),
- (0x41C, 'M', 'м'),
- (0x41D, 'M', 'н'),
- (0x41E, 'M', 'о'),
- (0x41F, 'M', 'п'),
- (0x420, 'M', 'р'),
- (0x421, 'M', 'с'),
- (0x422, 'M', 'т'),
- (0x423, 'M', 'у'),
- (0x424, 'M', 'ф'),
- (0x425, 'M', 'х'),
- (0x426, 'M', 'ц'),
- (0x427, 'M', 'ч'),
- (0x428, 'M', 'ш'),
- (0x429, 'M', 'щ'),
- (0x42A, 'M', 'ъ'),
- (0x42B, 'M', 'ы'),
- (0x42C, 'M', 'ь'),
- (0x42D, 'M', 'э'),
- (0x42E, 'M', 'ю'),
- (0x42F, 'M', 'я'),
+ (0x403, 'M', u'ѓ'),
+ (0x404, 'M', u'є'),
+ (0x405, 'M', u'ѕ'),
+ (0x406, 'M', u'і'),
+ (0x407, 'M', u'ї'),
+ (0x408, 'M', u'ј'),
+ (0x409, 'M', u'љ'),
+ (0x40A, 'M', u'њ'),
+ (0x40B, 'M', u'ћ'),
+ (0x40C, 'M', u'ќ'),
+ (0x40D, 'M', u'ѝ'),
+ (0x40E, 'M', u'ў'),
+ (0x40F, 'M', u'џ'),
+ (0x410, 'M', u'а'),
+ (0x411, 'M', u'б'),
+ (0x412, 'M', u'в'),
+ (0x413, 'M', u'г'),
+ (0x414, 'M', u'д'),
+ (0x415, 'M', u'е'),
+ (0x416, 'M', u'ж'),
+ (0x417, 'M', u'з'),
+ (0x418, 'M', u'и'),
+ (0x419, 'M', u'й'),
+ (0x41A, 'M', u'к'),
+ (0x41B, 'M', u'л'),
+ (0x41C, 'M', u'м'),
+ (0x41D, 'M', u'н'),
+ (0x41E, 'M', u'о'),
+ (0x41F, 'M', u'п'),
+ (0x420, 'M', u'р'),
+ (0x421, 'M', u'с'),
+ (0x422, 'M', u'т'),
+ (0x423, 'M', u'у'),
+ (0x424, 'M', u'ф'),
+ (0x425, 'M', u'х'),
+ (0x426, 'M', u'ц'),
+ (0x427, 'M', u'ч'),
+ (0x428, 'M', u'ш'),
+ (0x429, 'M', u'щ'),
+ (0x42A, 'M', u'ъ'),
+ (0x42B, 'M', u'ы'),
+ (0x42C, 'M', u'ь'),
+ (0x42D, 'M', u'э'),
+ (0x42E, 'M', u'ю'),
+ (0x42F, 'M', u'я'),
(0x430, 'V'),
- (0x460, 'M', 'ѡ'),
+ (0x460, 'M', u'ѡ'),
(0x461, 'V'),
- (0x462, 'M', 'ѣ'),
+ (0x462, 'M', u'ѣ'),
(0x463, 'V'),
- (0x464, 'M', 'ѥ'),
+ (0x464, 'M', u'ѥ'),
(0x465, 'V'),
- (0x466, 'M', 'ѧ'),
+ (0x466, 'M', u'ѧ'),
(0x467, 'V'),
- (0x468, 'M', 'ѩ'),
+ (0x468, 'M', u'ѩ'),
(0x469, 'V'),
- (0x46A, 'M', 'ѫ'),
+ (0x46A, 'M', u'ѫ'),
(0x46B, 'V'),
- (0x46C, 'M', 'ѭ'),
+ (0x46C, 'M', u'ѭ'),
(0x46D, 'V'),
- (0x46E, 'M', 'ѯ'),
+ (0x46E, 'M', u'ѯ'),
(0x46F, 'V'),
- (0x470, 'M', 'ѱ'),
+ (0x470, 'M', u'ѱ'),
(0x471, 'V'),
- (0x472, 'M', 'ѳ'),
+ (0x472, 'M', u'ѳ'),
(0x473, 'V'),
- (0x474, 'M', 'ѵ'),
+ (0x474, 'M', u'ѵ'),
(0x475, 'V'),
- (0x476, 'M', 'ѷ'),
+ (0x476, 'M', u'ѷ'),
(0x477, 'V'),
- (0x478, 'M', 'ѹ'),
+ (0x478, 'M', u'ѹ'),
(0x479, 'V'),
- (0x47A, 'M', 'ѻ'),
+ (0x47A, 'M', u'ѻ'),
(0x47B, 'V'),
- (0x47C, 'M', 'ѽ'),
+ (0x47C, 'M', u'ѽ'),
(0x47D, 'V'),
- (0x47E, 'M', 'ѿ'),
+ (0x47E, 'M', u'ѿ'),
(0x47F, 'V'),
- (0x480, 'M', 'ҁ'),
+ (0x480, 'M', u'ҁ'),
(0x481, 'V'),
- (0x48A, 'M', 'ҋ'),
+ (0x48A, 'M', u'ҋ'),
(0x48B, 'V'),
- (0x48C, 'M', 'ҍ'),
+ (0x48C, 'M', u'ҍ'),
(0x48D, 'V'),
- (0x48E, 'M', 'ҏ'),
+ (0x48E, 'M', u'ҏ'),
(0x48F, 'V'),
- (0x490, 'M', 'ґ'),
+ (0x490, 'M', u'ґ'),
(0x491, 'V'),
- (0x492, 'M', 'ғ'),
+ (0x492, 'M', u'ғ'),
(0x493, 'V'),
- (0x494, 'M', 'ҕ'),
+ (0x494, 'M', u'ҕ'),
(0x495, 'V'),
- (0x496, 'M', 'җ'),
+ (0x496, 'M', u'җ'),
(0x497, 'V'),
- (0x498, 'M', 'ҙ'),
+ (0x498, 'M', u'ҙ'),
(0x499, 'V'),
- (0x49A, 'M', 'қ'),
+ (0x49A, 'M', u'қ'),
(0x49B, 'V'),
- (0x49C, 'M', 'ҝ'),
+ (0x49C, 'M', u'ҝ'),
(0x49D, 'V'),
]
-def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_8():
return [
- (0x49E, 'M', 'ҟ'),
+ (0x49E, 'M', u'ҟ'),
(0x49F, 'V'),
- (0x4A0, 'M', 'ҡ'),
+ (0x4A0, 'M', u'ҡ'),
(0x4A1, 'V'),
- (0x4A2, 'M', 'ң'),
+ (0x4A2, 'M', u'ң'),
(0x4A3, 'V'),
- (0x4A4, 'M', 'ҥ'),
+ (0x4A4, 'M', u'ҥ'),
(0x4A5, 'V'),
- (0x4A6, 'M', 'ҧ'),
+ (0x4A6, 'M', u'ҧ'),
(0x4A7, 'V'),
- (0x4A8, 'M', 'ҩ'),
+ (0x4A8, 'M', u'ҩ'),
(0x4A9, 'V'),
- (0x4AA, 'M', 'ҫ'),
+ (0x4AA, 'M', u'ҫ'),
(0x4AB, 'V'),
- (0x4AC, 'M', 'ҭ'),
+ (0x4AC, 'M', u'ҭ'),
(0x4AD, 'V'),
- (0x4AE, 'M', 'ү'),
+ (0x4AE, 'M', u'ү'),
(0x4AF, 'V'),
- (0x4B0, 'M', 'ұ'),
+ (0x4B0, 'M', u'ұ'),
(0x4B1, 'V'),
- (0x4B2, 'M', 'ҳ'),
+ (0x4B2, 'M', u'ҳ'),
(0x4B3, 'V'),
- (0x4B4, 'M', 'ҵ'),
+ (0x4B4, 'M', u'ҵ'),
(0x4B5, 'V'),
- (0x4B6, 'M', 'ҷ'),
+ (0x4B6, 'M', u'ҷ'),
(0x4B7, 'V'),
- (0x4B8, 'M', 'ҹ'),
+ (0x4B8, 'M', u'ҹ'),
(0x4B9, 'V'),
- (0x4BA, 'M', 'һ'),
+ (0x4BA, 'M', u'һ'),
(0x4BB, 'V'),
- (0x4BC, 'M', 'ҽ'),
+ (0x4BC, 'M', u'ҽ'),
(0x4BD, 'V'),
- (0x4BE, 'M', 'ҿ'),
+ (0x4BE, 'M', u'ҿ'),
(0x4BF, 'V'),
(0x4C0, 'X'),
- (0x4C1, 'M', 'ӂ'),
+ (0x4C1, 'M', u'ӂ'),
(0x4C2, 'V'),
- (0x4C3, 'M', 'ӄ'),
+ (0x4C3, 'M', u'ӄ'),
(0x4C4, 'V'),
- (0x4C5, 'M', 'ӆ'),
+ (0x4C5, 'M', u'ӆ'),
(0x4C6, 'V'),
- (0x4C7, 'M', 'ӈ'),
+ (0x4C7, 'M', u'ӈ'),
(0x4C8, 'V'),
- (0x4C9, 'M', 'ӊ'),
+ (0x4C9, 'M', u'ӊ'),
(0x4CA, 'V'),
- (0x4CB, 'M', 'ӌ'),
+ (0x4CB, 'M', u'ӌ'),
(0x4CC, 'V'),
- (0x4CD, 'M', 'ӎ'),
+ (0x4CD, 'M', u'ӎ'),
(0x4CE, 'V'),
- (0x4D0, 'M', 'ӑ'),
+ (0x4D0, 'M', u'ӑ'),
(0x4D1, 'V'),
- (0x4D2, 'M', 'ӓ'),
+ (0x4D2, 'M', u'ӓ'),
(0x4D3, 'V'),
- (0x4D4, 'M', 'ӕ'),
+ (0x4D4, 'M', u'ӕ'),
(0x4D5, 'V'),
- (0x4D6, 'M', 'ӗ'),
+ (0x4D6, 'M', u'ӗ'),
(0x4D7, 'V'),
- (0x4D8, 'M', 'ә'),
+ (0x4D8, 'M', u'ә'),
(0x4D9, 'V'),
- (0x4DA, 'M', 'ӛ'),
+ (0x4DA, 'M', u'ӛ'),
(0x4DB, 'V'),
- (0x4DC, 'M', 'ӝ'),
+ (0x4DC, 'M', u'ӝ'),
(0x4DD, 'V'),
- (0x4DE, 'M', 'ӟ'),
+ (0x4DE, 'M', u'ӟ'),
(0x4DF, 'V'),
- (0x4E0, 'M', 'ӡ'),
+ (0x4E0, 'M', u'ӡ'),
(0x4E1, 'V'),
- (0x4E2, 'M', 'ӣ'),
+ (0x4E2, 'M', u'ӣ'),
(0x4E3, 'V'),
- (0x4E4, 'M', 'ӥ'),
+ (0x4E4, 'M', u'ӥ'),
(0x4E5, 'V'),
- (0x4E6, 'M', 'ӧ'),
+ (0x4E6, 'M', u'ӧ'),
(0x4E7, 'V'),
- (0x4E8, 'M', 'ө'),
+ (0x4E8, 'M', u'ө'),
(0x4E9, 'V'),
- (0x4EA, 'M', 'ӫ'),
+ (0x4EA, 'M', u'ӫ'),
(0x4EB, 'V'),
- (0x4EC, 'M', 'ӭ'),
+ (0x4EC, 'M', u'ӭ'),
(0x4ED, 'V'),
- (0x4EE, 'M', 'ӯ'),
+ (0x4EE, 'M', u'ӯ'),
(0x4EF, 'V'),
- (0x4F0, 'M', 'ӱ'),
+ (0x4F0, 'M', u'ӱ'),
(0x4F1, 'V'),
- (0x4F2, 'M', 'ӳ'),
+ (0x4F2, 'M', u'ӳ'),
(0x4F3, 'V'),
- (0x4F4, 'M', 'ӵ'),
+ (0x4F4, 'M', u'ӵ'),
(0x4F5, 'V'),
- (0x4F6, 'M', 'ӷ'),
+ (0x4F6, 'M', u'ӷ'),
(0x4F7, 'V'),
- (0x4F8, 'M', 'ӹ'),
+ (0x4F8, 'M', u'ӹ'),
(0x4F9, 'V'),
- (0x4FA, 'M', 'ӻ'),
+ (0x4FA, 'M', u'ӻ'),
(0x4FB, 'V'),
- (0x4FC, 'M', 'ӽ'),
+ (0x4FC, 'M', u'ӽ'),
(0x4FD, 'V'),
- (0x4FE, 'M', 'ӿ'),
+ (0x4FE, 'M', u'ӿ'),
(0x4FF, 'V'),
- (0x500, 'M', 'ԁ'),
+ (0x500, 'M', u'ԁ'),
(0x501, 'V'),
- (0x502, 'M', 'ԃ'),
+ (0x502, 'M', u'ԃ'),
]
-def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_9():
return [
(0x503, 'V'),
- (0x504, 'M', 'ԅ'),
+ (0x504, 'M', u'ԅ'),
(0x505, 'V'),
- (0x506, 'M', 'ԇ'),
+ (0x506, 'M', u'ԇ'),
(0x507, 'V'),
- (0x508, 'M', 'ԉ'),
+ (0x508, 'M', u'ԉ'),
(0x509, 'V'),
- (0x50A, 'M', 'ԋ'),
+ (0x50A, 'M', u'ԋ'),
(0x50B, 'V'),
- (0x50C, 'M', 'ԍ'),
+ (0x50C, 'M', u'ԍ'),
(0x50D, 'V'),
- (0x50E, 'M', 'ԏ'),
+ (0x50E, 'M', u'ԏ'),
(0x50F, 'V'),
- (0x510, 'M', 'ԑ'),
+ (0x510, 'M', u'ԑ'),
(0x511, 'V'),
- (0x512, 'M', 'ԓ'),
+ (0x512, 'M', u'ԓ'),
(0x513, 'V'),
- (0x514, 'M', 'ԕ'),
+ (0x514, 'M', u'ԕ'),
(0x515, 'V'),
- (0x516, 'M', 'ԗ'),
+ (0x516, 'M', u'ԗ'),
(0x517, 'V'),
- (0x518, 'M', 'ԙ'),
+ (0x518, 'M', u'ԙ'),
(0x519, 'V'),
- (0x51A, 'M', 'ԛ'),
+ (0x51A, 'M', u'ԛ'),
(0x51B, 'V'),
- (0x51C, 'M', 'ԝ'),
+ (0x51C, 'M', u'ԝ'),
(0x51D, 'V'),
- (0x51E, 'M', 'ԟ'),
+ (0x51E, 'M', u'ԟ'),
(0x51F, 'V'),
- (0x520, 'M', 'ԡ'),
+ (0x520, 'M', u'ԡ'),
(0x521, 'V'),
- (0x522, 'M', 'ԣ'),
+ (0x522, 'M', u'ԣ'),
(0x523, 'V'),
- (0x524, 'M', 'ԥ'),
+ (0x524, 'M', u'ԥ'),
(0x525, 'V'),
- (0x526, 'M', 'ԧ'),
+ (0x526, 'M', u'ԧ'),
(0x527, 'V'),
- (0x528, 'M', 'ԩ'),
+ (0x528, 'M', u'ԩ'),
(0x529, 'V'),
- (0x52A, 'M', 'ԫ'),
+ (0x52A, 'M', u'ԫ'),
(0x52B, 'V'),
- (0x52C, 'M', 'ԭ'),
+ (0x52C, 'M', u'ԭ'),
(0x52D, 'V'),
- (0x52E, 'M', 'ԯ'),
+ (0x52E, 'M', u'ԯ'),
(0x52F, 'V'),
(0x530, 'X'),
- (0x531, 'M', 'ա'),
- (0x532, 'M', 'բ'),
- (0x533, 'M', 'գ'),
- (0x534, 'M', 'դ'),
- (0x535, 'M', 'ե'),
- (0x536, 'M', 'զ'),
- (0x537, 'M', 'է'),
- (0x538, 'M', 'ը'),
- (0x539, 'M', 'թ'),
- (0x53A, 'M', 'ժ'),
- (0x53B, 'M', 'ի'),
- (0x53C, 'M', 'լ'),
- (0x53D, 'M', 'խ'),
- (0x53E, 'M', 'ծ'),
- (0x53F, 'M', 'կ'),
- (0x540, 'M', 'հ'),
- (0x541, 'M', 'ձ'),
- (0x542, 'M', 'ղ'),
- (0x543, 'M', 'ճ'),
- (0x544, 'M', 'մ'),
- (0x545, 'M', 'յ'),
- (0x546, 'M', 'ն'),
- (0x547, 'M', 'շ'),
- (0x548, 'M', 'ո'),
- (0x549, 'M', 'չ'),
- (0x54A, 'M', 'պ'),
- (0x54B, 'M', 'ջ'),
- (0x54C, 'M', 'ռ'),
- (0x54D, 'M', 'ս'),
- (0x54E, 'M', 'վ'),
- (0x54F, 'M', 'տ'),
- (0x550, 'M', 'ր'),
- (0x551, 'M', 'ց'),
- (0x552, 'M', 'ւ'),
- (0x553, 'M', 'փ'),
- (0x554, 'M', 'ք'),
- (0x555, 'M', 'օ'),
- (0x556, 'M', 'ֆ'),
+ (0x531, 'M', u'ա'),
+ (0x532, 'M', u'բ'),
+ (0x533, 'M', u'գ'),
+ (0x534, 'M', u'դ'),
+ (0x535, 'M', u'ե'),
+ (0x536, 'M', u'զ'),
+ (0x537, 'M', u'է'),
+ (0x538, 'M', u'ը'),
+ (0x539, 'M', u'թ'),
+ (0x53A, 'M', u'ժ'),
+ (0x53B, 'M', u'ի'),
+ (0x53C, 'M', u'լ'),
+ (0x53D, 'M', u'խ'),
+ (0x53E, 'M', u'ծ'),
+ (0x53F, 'M', u'կ'),
+ (0x540, 'M', u'հ'),
+ (0x541, 'M', u'ձ'),
+ (0x542, 'M', u'ղ'),
+ (0x543, 'M', u'ճ'),
+ (0x544, 'M', u'մ'),
+ (0x545, 'M', u'յ'),
+ (0x546, 'M', u'ն'),
+ (0x547, 'M', u'շ'),
+ (0x548, 'M', u'ո'),
+ (0x549, 'M', u'չ'),
+ (0x54A, 'M', u'պ'),
+ (0x54B, 'M', u'ջ'),
+ (0x54C, 'M', u'ռ'),
+ (0x54D, 'M', u'ս'),
+ (0x54E, 'M', u'վ'),
+ (0x54F, 'M', u'տ'),
+ (0x550, 'M', u'ր'),
+ (0x551, 'M', u'ց'),
+ (0x552, 'M', u'ւ'),
+ (0x553, 'M', u'փ'),
+ (0x554, 'M', u'ք'),
+ (0x555, 'M', u'օ'),
+ (0x556, 'M', u'ֆ'),
(0x557, 'X'),
(0x559, 'V'),
- (0x587, 'M', 'եւ'),
+ (0x587, 'M', u'եւ'),
(0x588, 'V'),
(0x58B, 'X'),
(0x58D, 'V'),
@@ -1045,15 +1042,15 @@ def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x5F5, 'X'),
(0x606, 'V'),
(0x61C, 'X'),
- (0x61D, 'V'),
+ (0x61E, 'V'),
]
-def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_10():
return [
- (0x675, 'M', 'اٴ'),
- (0x676, 'M', 'وٴ'),
- (0x677, 'M', 'ۇٴ'),
- (0x678, 'M', 'يٴ'),
+ (0x675, 'M', u'اٴ'),
+ (0x676, 'M', u'وٴ'),
+ (0x677, 'M', u'ۇٴ'),
+ (0x678, 'M', u'يٴ'),
(0x679, 'V'),
(0x6DD, 'X'),
(0x6DE, 'V'),
@@ -1074,19 +1071,21 @@ def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x85F, 'X'),
(0x860, 'V'),
(0x86B, 'X'),
- (0x870, 'V'),
- (0x88F, 'X'),
- (0x898, 'V'),
+ (0x8A0, 'V'),
+ (0x8B5, 'X'),
+ (0x8B6, 'V'),
+ (0x8BE, 'X'),
+ (0x8D3, 'V'),
(0x8E2, 'X'),
(0x8E3, 'V'),
- (0x958, 'M', 'क़'),
- (0x959, 'M', 'ख़'),
- (0x95A, 'M', 'ग़'),
- (0x95B, 'M', 'ज़'),
- (0x95C, 'M', 'ड़'),
- (0x95D, 'M', 'ढ़'),
- (0x95E, 'M', 'फ़'),
- (0x95F, 'M', 'य़'),
+ (0x958, 'M', u'क़'),
+ (0x959, 'M', u'ख़'),
+ (0x95A, 'M', u'ग़'),
+ (0x95B, 'M', u'ज़'),
+ (0x95C, 'M', u'ड़'),
+ (0x95D, 'M', u'ढ़'),
+ (0x95E, 'M', u'फ़'),
+ (0x95F, 'M', u'य़'),
(0x960, 'V'),
(0x984, 'X'),
(0x985, 'V'),
@@ -1109,10 +1108,10 @@ def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x9CF, 'X'),
(0x9D7, 'V'),
(0x9D8, 'X'),
- (0x9DC, 'M', 'ড়'),
- (0x9DD, 'M', 'ঢ়'),
+ (0x9DC, 'M', u'ড়'),
+ (0x9DD, 'M', u'ঢ়'),
(0x9DE, 'X'),
- (0x9DF, 'M', 'য়'),
+ (0x9DF, 'M', u'য়'),
(0x9E0, 'V'),
(0x9E4, 'X'),
(0x9E6, 'V'),
@@ -1128,10 +1127,10 @@ def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xA2A, 'V'),
(0xA31, 'X'),
(0xA32, 'V'),
- (0xA33, 'M', 'ਲ਼'),
+ (0xA33, 'M', u'ਲ਼'),
(0xA34, 'X'),
(0xA35, 'V'),
- (0xA36, 'M', 'ਸ਼'),
+ (0xA36, 'M', u'ਸ਼'),
(0xA37, 'X'),
(0xA38, 'V'),
(0xA3A, 'X'),
@@ -1145,16 +1144,16 @@ def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xA4E, 'X'),
(0xA51, 'V'),
(0xA52, 'X'),
- (0xA59, 'M', 'ਖ਼'),
- (0xA5A, 'M', 'ਗ਼'),
- (0xA5B, 'M', 'ਜ਼'),
- (0xA5C, 'V'),
- (0xA5D, 'X'),
+ (0xA59, 'M', u'ਖ਼'),
+ (0xA5A, 'M', u'ਗ਼'),
+ (0xA5B, 'M', u'ਜ਼'),
]
-def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_11():
return [
- (0xA5E, 'M', 'ਫ਼'),
+ (0xA5C, 'V'),
+ (0xA5D, 'X'),
+ (0xA5E, 'M', u'ਫ਼'),
(0xA5F, 'X'),
(0xA66, 'V'),
(0xA77, 'X'),
@@ -1206,10 +1205,10 @@ def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xB49, 'X'),
(0xB4B, 'V'),
(0xB4E, 'X'),
- (0xB55, 'V'),
+ (0xB56, 'V'),
(0xB58, 'X'),
- (0xB5C, 'M', 'ଡ଼'),
- (0xB5D, 'M', 'ଢ଼'),
+ (0xB5C, 'M', u'ଡ଼'),
+ (0xB5D, 'M', u'ଢ଼'),
(0xB5E, 'X'),
(0xB5F, 'V'),
(0xB64, 'X'),
@@ -1252,14 +1251,14 @@ def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xC0E, 'V'),
(0xC11, 'X'),
(0xC12, 'V'),
- (0xC29, 'X'),
- (0xC2A, 'V'),
]
-def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_12():
return [
+ (0xC29, 'X'),
+ (0xC2A, 'V'),
(0xC3A, 'X'),
- (0xC3C, 'V'),
+ (0xC3D, 'V'),
(0xC45, 'X'),
(0xC46, 'V'),
(0xC49, 'X'),
@@ -1269,13 +1268,11 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xC57, 'X'),
(0xC58, 'V'),
(0xC5B, 'X'),
- (0xC5D, 'V'),
- (0xC5E, 'X'),
(0xC60, 'V'),
(0xC64, 'X'),
(0xC66, 'V'),
(0xC70, 'X'),
- (0xC77, 'V'),
+ (0xC78, 'V'),
(0xC8D, 'X'),
(0xC8E, 'V'),
(0xC91, 'X'),
@@ -1293,7 +1290,7 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xCCE, 'X'),
(0xCD5, 'V'),
(0xCD7, 'X'),
- (0xCDD, 'V'),
+ (0xCDE, 'V'),
(0xCDF, 'X'),
(0xCE0, 'V'),
(0xCE4, 'X'),
@@ -1302,6 +1299,8 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xCF1, 'V'),
(0xCF3, 'X'),
(0xD00, 'V'),
+ (0xD04, 'X'),
+ (0xD05, 'V'),
(0xD0D, 'X'),
(0xD0E, 'V'),
(0xD11, 'X'),
@@ -1315,7 +1314,7 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xD64, 'X'),
(0xD66, 'V'),
(0xD80, 'X'),
- (0xD81, 'V'),
+ (0xD82, 'V'),
(0xD84, 'X'),
(0xD85, 'V'),
(0xD97, 'X'),
@@ -1340,7 +1339,7 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xDF2, 'V'),
(0xDF5, 'X'),
(0xE01, 'V'),
- (0xE33, 'M', 'ํา'),
+ (0xE33, 'M', u'ํา'),
(0xE34, 'V'),
(0xE3B, 'X'),
(0xE3F, 'V'),
@@ -1349,19 +1348,33 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xE83, 'X'),
(0xE84, 'V'),
(0xE85, 'X'),
- (0xE86, 'V'),
+ (0xE87, 'V'),
+ (0xE89, 'X'),
+ (0xE8A, 'V'),
(0xE8B, 'X'),
- (0xE8C, 'V'),
+ (0xE8D, 'V'),
+ (0xE8E, 'X'),
+ (0xE94, 'V'),
+ ]
+
+def _seg_13():
+ return [
+ (0xE98, 'X'),
+ (0xE99, 'V'),
+ (0xEA0, 'X'),
+ (0xEA1, 'V'),
(0xEA4, 'X'),
(0xEA5, 'V'),
(0xEA6, 'X'),
(0xEA7, 'V'),
- (0xEB3, 'M', 'ໍາ'),
+ (0xEA8, 'X'),
+ (0xEAA, 'V'),
+ (0xEAC, 'X'),
+ (0xEAD, 'V'),
+ (0xEB3, 'M', u'ໍາ'),
(0xEB4, 'V'),
- ]
-
-def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
+ (0xEBA, 'X'),
+ (0xEBB, 'V'),
(0xEBE, 'X'),
(0xEC0, 'V'),
(0xEC5, 'X'),
@@ -1371,52 +1384,52 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xECE, 'X'),
(0xED0, 'V'),
(0xEDA, 'X'),
- (0xEDC, 'M', 'ຫນ'),
- (0xEDD, 'M', 'ຫມ'),
+ (0xEDC, 'M', u'ຫນ'),
+ (0xEDD, 'M', u'ຫມ'),
(0xEDE, 'V'),
(0xEE0, 'X'),
(0xF00, 'V'),
- (0xF0C, 'M', '་'),
+ (0xF0C, 'M', u'་'),
(0xF0D, 'V'),
- (0xF43, 'M', 'གྷ'),
+ (0xF43, 'M', u'གྷ'),
(0xF44, 'V'),
(0xF48, 'X'),
(0xF49, 'V'),
- (0xF4D, 'M', 'ཌྷ'),
+ (0xF4D, 'M', u'ཌྷ'),
(0xF4E, 'V'),
- (0xF52, 'M', 'དྷ'),
+ (0xF52, 'M', u'དྷ'),
(0xF53, 'V'),
- (0xF57, 'M', 'བྷ'),
+ (0xF57, 'M', u'བྷ'),
(0xF58, 'V'),
- (0xF5C, 'M', 'ཛྷ'),
+ (0xF5C, 'M', u'ཛྷ'),
(0xF5D, 'V'),
- (0xF69, 'M', 'ཀྵ'),
+ (0xF69, 'M', u'ཀྵ'),
(0xF6A, 'V'),
(0xF6D, 'X'),
(0xF71, 'V'),
- (0xF73, 'M', 'ཱི'),
+ (0xF73, 'M', u'ཱི'),
(0xF74, 'V'),
- (0xF75, 'M', 'ཱུ'),
- (0xF76, 'M', 'ྲྀ'),
- (0xF77, 'M', 'ྲཱྀ'),
- (0xF78, 'M', 'ླྀ'),
- (0xF79, 'M', 'ླཱྀ'),
+ (0xF75, 'M', u'ཱུ'),
+ (0xF76, 'M', u'ྲྀ'),
+ (0xF77, 'M', u'ྲཱྀ'),
+ (0xF78, 'M', u'ླྀ'),
+ (0xF79, 'M', u'ླཱྀ'),
(0xF7A, 'V'),
- (0xF81, 'M', 'ཱྀ'),
+ (0xF81, 'M', u'ཱྀ'),
(0xF82, 'V'),
- (0xF93, 'M', 'ྒྷ'),
+ (0xF93, 'M', u'ྒྷ'),
(0xF94, 'V'),
(0xF98, 'X'),
(0xF99, 'V'),
- (0xF9D, 'M', 'ྜྷ'),
+ (0xF9D, 'M', u'ྜྷ'),
(0xF9E, 'V'),
- (0xFA2, 'M', 'ྡྷ'),
+ (0xFA2, 'M', u'ྡྷ'),
(0xFA3, 'V'),
- (0xFA7, 'M', 'ྦྷ'),
+ (0xFA7, 'M', u'ྦྷ'),
(0xFA8, 'V'),
- (0xFAC, 'M', 'ྫྷ'),
+ (0xFAC, 'M', u'ྫྷ'),
(0xFAD, 'V'),
- (0xFB9, 'M', 'ྐྵ'),
+ (0xFB9, 'M', u'ྐྵ'),
(0xFBA, 'V'),
(0xFBD, 'X'),
(0xFBE, 'V'),
@@ -1425,12 +1438,12 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xFDB, 'X'),
(0x1000, 'V'),
(0x10A0, 'X'),
- (0x10C7, 'M', 'ⴧ'),
+ (0x10C7, 'M', u'ⴧ'),
(0x10C8, 'X'),
- (0x10CD, 'M', 'ⴭ'),
+ (0x10CD, 'M', u'ⴭ'),
(0x10CE, 'X'),
(0x10D0, 'V'),
- (0x10FC, 'M', 'ნ'),
+ (0x10FC, 'M', u'ნ'),
(0x10FD, 'V'),
(0x115F, 'X'),
(0x1161, 'V'),
@@ -1446,6 +1459,10 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1260, 'V'),
(0x1289, 'X'),
(0x128A, 'V'),
+ ]
+
+def _seg_14():
+ return [
(0x128E, 'X'),
(0x1290, 'V'),
(0x12B1, 'X'),
@@ -1462,10 +1479,6 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x12D8, 'V'),
(0x1311, 'X'),
(0x1312, 'V'),
- ]
-
-def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
(0x1316, 'X'),
(0x1318, 'V'),
(0x135B, 'X'),
@@ -1475,12 +1488,12 @@ def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x139A, 'X'),
(0x13A0, 'V'),
(0x13F6, 'X'),
- (0x13F8, 'M', 'Ᏸ'),
- (0x13F9, 'M', 'Ᏹ'),
- (0x13FA, 'M', 'Ᏺ'),
- (0x13FB, 'M', 'Ᏻ'),
- (0x13FC, 'M', 'Ᏼ'),
- (0x13FD, 'M', 'Ᏽ'),
+ (0x13F8, 'M', u'Ᏸ'),
+ (0x13F9, 'M', u'Ᏹ'),
+ (0x13FA, 'M', u'Ᏺ'),
+ (0x13FB, 'M', u'Ᏻ'),
+ (0x13FC, 'M', u'Ᏼ'),
+ (0x13FD, 'M', u'Ᏽ'),
(0x13FE, 'X'),
(0x1400, 'V'),
(0x1680, 'X'),
@@ -1489,8 +1502,10 @@ def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x16A0, 'V'),
(0x16F9, 'X'),
(0x1700, 'V'),
- (0x1716, 'X'),
- (0x171F, 'V'),
+ (0x170D, 'X'),
+ (0x170E, 'V'),
+ (0x1715, 'X'),
+ (0x1720, 'V'),
(0x1737, 'X'),
(0x1740, 'V'),
(0x1754, 'X'),
@@ -1513,7 +1528,6 @@ def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1807, 'V'),
(0x180B, 'I'),
(0x180E, 'X'),
- (0x180F, 'I'),
(0x1810, 'V'),
(0x181A, 'X'),
(0x1820, 'V'),
@@ -1549,15 +1563,19 @@ def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1A7F, 'V'),
(0x1A8A, 'X'),
(0x1A90, 'V'),
+ ]
+
+def _seg_15():
+ return [
(0x1A9A, 'X'),
(0x1AA0, 'V'),
(0x1AAE, 'X'),
(0x1AB0, 'V'),
- (0x1ACF, 'X'),
+ (0x1ABF, 'X'),
(0x1B00, 'V'),
- (0x1B4D, 'X'),
+ (0x1B4C, 'X'),
(0x1B50, 'V'),
- (0x1B7F, 'X'),
+ (0x1B7D, 'X'),
(0x1B80, 'V'),
(0x1BF4, 'X'),
(0x1BFC, 'V'),
@@ -1565,1193 +1583,1148 @@ def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1C3B, 'V'),
(0x1C4A, 'X'),
(0x1C4D, 'V'),
- (0x1C80, 'M', 'в'),
- ]
-
-def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1C81, 'M', 'д'),
- (0x1C82, 'M', 'о'),
- (0x1C83, 'M', 'с'),
- (0x1C84, 'M', 'т'),
- (0x1C86, 'M', 'ъ'),
- (0x1C87, 'M', 'ѣ'),
- (0x1C88, 'M', 'ꙋ'),
+ (0x1C80, 'M', u'в'),
+ (0x1C81, 'M', u'д'),
+ (0x1C82, 'M', u'о'),
+ (0x1C83, 'M', u'с'),
+ (0x1C84, 'M', u'т'),
+ (0x1C86, 'M', u'ъ'),
+ (0x1C87, 'M', u'ѣ'),
+ (0x1C88, 'M', u'ꙋ'),
(0x1C89, 'X'),
- (0x1C90, 'M', 'ა'),
- (0x1C91, 'M', 'ბ'),
- (0x1C92, 'M', 'გ'),
- (0x1C93, 'M', 'დ'),
- (0x1C94, 'M', 'ე'),
- (0x1C95, 'M', 'ვ'),
- (0x1C96, 'M', 'ზ'),
- (0x1C97, 'M', 'თ'),
- (0x1C98, 'M', 'ი'),
- (0x1C99, 'M', 'კ'),
- (0x1C9A, 'M', 'ლ'),
- (0x1C9B, 'M', 'მ'),
- (0x1C9C, 'M', 'ნ'),
- (0x1C9D, 'M', 'ო'),
- (0x1C9E, 'M', 'პ'),
- (0x1C9F, 'M', 'ჟ'),
- (0x1CA0, 'M', 'რ'),
- (0x1CA1, 'M', 'ს'),
- (0x1CA2, 'M', 'ტ'),
- (0x1CA3, 'M', 'უ'),
- (0x1CA4, 'M', 'ფ'),
- (0x1CA5, 'M', 'ქ'),
- (0x1CA6, 'M', 'ღ'),
- (0x1CA7, 'M', 'ყ'),
- (0x1CA8, 'M', 'შ'),
- (0x1CA9, 'M', 'ჩ'),
- (0x1CAA, 'M', 'ც'),
- (0x1CAB, 'M', 'ძ'),
- (0x1CAC, 'M', 'წ'),
- (0x1CAD, 'M', 'ჭ'),
- (0x1CAE, 'M', 'ხ'),
- (0x1CAF, 'M', 'ჯ'),
- (0x1CB0, 'M', 'ჰ'),
- (0x1CB1, 'M', 'ჱ'),
- (0x1CB2, 'M', 'ჲ'),
- (0x1CB3, 'M', 'ჳ'),
- (0x1CB4, 'M', 'ჴ'),
- (0x1CB5, 'M', 'ჵ'),
- (0x1CB6, 'M', 'ჶ'),
- (0x1CB7, 'M', 'ჷ'),
- (0x1CB8, 'M', 'ჸ'),
- (0x1CB9, 'M', 'ჹ'),
- (0x1CBA, 'M', 'ჺ'),
- (0x1CBB, 'X'),
- (0x1CBD, 'M', 'ჽ'),
- (0x1CBE, 'M', 'ჾ'),
- (0x1CBF, 'M', 'ჿ'),
(0x1CC0, 'V'),
(0x1CC8, 'X'),
(0x1CD0, 'V'),
- (0x1CFB, 'X'),
+ (0x1CFA, 'X'),
(0x1D00, 'V'),
- (0x1D2C, 'M', 'a'),
- (0x1D2D, 'M', 'æ'),
- (0x1D2E, 'M', 'b'),
+ (0x1D2C, 'M', u'a'),
+ (0x1D2D, 'M', u'æ'),
+ (0x1D2E, 'M', u'b'),
(0x1D2F, 'V'),
- (0x1D30, 'M', 'd'),
- (0x1D31, 'M', 'e'),
- (0x1D32, 'M', 'ǝ'),
- (0x1D33, 'M', 'g'),
- (0x1D34, 'M', 'h'),
- (0x1D35, 'M', 'i'),
- (0x1D36, 'M', 'j'),
- (0x1D37, 'M', 'k'),
- (0x1D38, 'M', 'l'),
- (0x1D39, 'M', 'm'),
- (0x1D3A, 'M', 'n'),
+ (0x1D30, 'M', u'd'),
+ (0x1D31, 'M', u'e'),
+ (0x1D32, 'M', u'ǝ'),
+ (0x1D33, 'M', u'g'),
+ (0x1D34, 'M', u'h'),
+ (0x1D35, 'M', u'i'),
+ (0x1D36, 'M', u'j'),
+ (0x1D37, 'M', u'k'),
+ (0x1D38, 'M', u'l'),
+ (0x1D39, 'M', u'm'),
+ (0x1D3A, 'M', u'n'),
(0x1D3B, 'V'),
- (0x1D3C, 'M', 'o'),
- (0x1D3D, 'M', 'ȣ'),
- (0x1D3E, 'M', 'p'),
- (0x1D3F, 'M', 'r'),
- (0x1D40, 'M', 't'),
- (0x1D41, 'M', 'u'),
- (0x1D42, 'M', 'w'),
- (0x1D43, 'M', 'a'),
- (0x1D44, 'M', 'ɐ'),
- (0x1D45, 'M', 'ɑ'),
- (0x1D46, 'M', 'ᴂ'),
- (0x1D47, 'M', 'b'),
- (0x1D48, 'M', 'd'),
- (0x1D49, 'M', 'e'),
- (0x1D4A, 'M', 'ə'),
- (0x1D4B, 'M', 'ɛ'),
- (0x1D4C, 'M', 'ɜ'),
- (0x1D4D, 'M', 'g'),
+ (0x1D3C, 'M', u'o'),
+ (0x1D3D, 'M', u'ȣ'),
+ (0x1D3E, 'M', u'p'),
+ (0x1D3F, 'M', u'r'),
+ (0x1D40, 'M', u't'),
+ (0x1D41, 'M', u'u'),
+ (0x1D42, 'M', u'w'),
+ (0x1D43, 'M', u'a'),
+ (0x1D44, 'M', u'ɐ'),
+ (0x1D45, 'M', u'ɑ'),
+ (0x1D46, 'M', u'ᴂ'),
+ (0x1D47, 'M', u'b'),
+ (0x1D48, 'M', u'd'),
+ (0x1D49, 'M', u'e'),
+ (0x1D4A, 'M', u'ə'),
+ (0x1D4B, 'M', u'ɛ'),
+ (0x1D4C, 'M', u'ɜ'),
+ (0x1D4D, 'M', u'g'),
(0x1D4E, 'V'),
- (0x1D4F, 'M', 'k'),
- (0x1D50, 'M', 'm'),
- (0x1D51, 'M', 'ŋ'),
- (0x1D52, 'M', 'o'),
- (0x1D53, 'M', 'ɔ'),
+ (0x1D4F, 'M', u'k'),
+ (0x1D50, 'M', u'm'),
+ (0x1D51, 'M', u'ŋ'),
+ (0x1D52, 'M', u'o'),
+ (0x1D53, 'M', u'ɔ'),
+ (0x1D54, 'M', u'ᴖ'),
+ (0x1D55, 'M', u'ᴗ'),
+ (0x1D56, 'M', u'p'),
+ (0x1D57, 'M', u't'),
+ (0x1D58, 'M', u'u'),
+ (0x1D59, 'M', u'ᴝ'),
+ (0x1D5A, 'M', u'ɯ'),
+ (0x1D5B, 'M', u'v'),
+ (0x1D5C, 'M', u'ᴥ'),
+ (0x1D5D, 'M', u'β'),
+ (0x1D5E, 'M', u'γ'),
+ (0x1D5F, 'M', u'δ'),
+ (0x1D60, 'M', u'φ'),
+ (0x1D61, 'M', u'χ'),
+ (0x1D62, 'M', u'i'),
+ (0x1D63, 'M', u'r'),
+ (0x1D64, 'M', u'u'),
+ (0x1D65, 'M', u'v'),
+ (0x1D66, 'M', u'β'),
+ (0x1D67, 'M', u'γ'),
+ (0x1D68, 'M', u'ρ'),
+ (0x1D69, 'M', u'φ'),
+ (0x1D6A, 'M', u'χ'),
+ (0x1D6B, 'V'),
+ (0x1D78, 'M', u'н'),
+ (0x1D79, 'V'),
+ (0x1D9B, 'M', u'ɒ'),
+ (0x1D9C, 'M', u'c'),
+ (0x1D9D, 'M', u'ɕ'),
+ (0x1D9E, 'M', u'ð'),
]
-def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_16():
return [
- (0x1D54, 'M', 'ᴖ'),
- (0x1D55, 'M', 'ᴗ'),
- (0x1D56, 'M', 'p'),
- (0x1D57, 'M', 't'),
- (0x1D58, 'M', 'u'),
- (0x1D59, 'M', 'ᴝ'),
- (0x1D5A, 'M', 'ɯ'),
- (0x1D5B, 'M', 'v'),
- (0x1D5C, 'M', 'ᴥ'),
- (0x1D5D, 'M', 'β'),
- (0x1D5E, 'M', 'γ'),
- (0x1D5F, 'M', 'δ'),
- (0x1D60, 'M', 'φ'),
- (0x1D61, 'M', 'χ'),
- (0x1D62, 'M', 'i'),
- (0x1D63, 'M', 'r'),
- (0x1D64, 'M', 'u'),
- (0x1D65, 'M', 'v'),
- (0x1D66, 'M', 'β'),
- (0x1D67, 'M', 'γ'),
- (0x1D68, 'M', 'ρ'),
- (0x1D69, 'M', 'φ'),
- (0x1D6A, 'M', 'χ'),
- (0x1D6B, 'V'),
- (0x1D78, 'M', 'н'),
- (0x1D79, 'V'),
- (0x1D9B, 'M', 'ɒ'),
- (0x1D9C, 'M', 'c'),
- (0x1D9D, 'M', 'ɕ'),
- (0x1D9E, 'M', 'ð'),
- (0x1D9F, 'M', 'ɜ'),
- (0x1DA0, 'M', 'f'),
- (0x1DA1, 'M', 'ɟ'),
- (0x1DA2, 'M', 'ɡ'),
- (0x1DA3, 'M', 'ɥ'),
- (0x1DA4, 'M', 'ɨ'),
- (0x1DA5, 'M', 'ɩ'),
- (0x1DA6, 'M', 'ɪ'),
- (0x1DA7, 'M', 'ᵻ'),
- (0x1DA8, 'M', 'ʝ'),
- (0x1DA9, 'M', 'ɭ'),
- (0x1DAA, 'M', 'ᶅ'),
- (0x1DAB, 'M', 'ʟ'),
- (0x1DAC, 'M', 'ɱ'),
- (0x1DAD, 'M', 'ɰ'),
- (0x1DAE, 'M', 'ɲ'),
- (0x1DAF, 'M', 'ɳ'),
- (0x1DB0, 'M', 'ɴ'),
- (0x1DB1, 'M', 'ɵ'),
- (0x1DB2, 'M', 'ɸ'),
- (0x1DB3, 'M', 'ʂ'),
- (0x1DB4, 'M', 'ʃ'),
- (0x1DB5, 'M', 'ƫ'),
- (0x1DB6, 'M', 'ʉ'),
- (0x1DB7, 'M', 'ʊ'),
- (0x1DB8, 'M', 'ᴜ'),
- (0x1DB9, 'M', 'ʋ'),
- (0x1DBA, 'M', 'ʌ'),
- (0x1DBB, 'M', 'z'),
- (0x1DBC, 'M', 'ʐ'),
- (0x1DBD, 'M', 'ʑ'),
- (0x1DBE, 'M', 'ʒ'),
- (0x1DBF, 'M', 'θ'),
+ (0x1D9F, 'M', u'ɜ'),
+ (0x1DA0, 'M', u'f'),
+ (0x1DA1, 'M', u'ɟ'),
+ (0x1DA2, 'M', u'ɡ'),
+ (0x1DA3, 'M', u'ɥ'),
+ (0x1DA4, 'M', u'ɨ'),
+ (0x1DA5, 'M', u'ɩ'),
+ (0x1DA6, 'M', u'ɪ'),
+ (0x1DA7, 'M', u'ᵻ'),
+ (0x1DA8, 'M', u'ʝ'),
+ (0x1DA9, 'M', u'ɭ'),
+ (0x1DAA, 'M', u'ᶅ'),
+ (0x1DAB, 'M', u'ʟ'),
+ (0x1DAC, 'M', u'ɱ'),
+ (0x1DAD, 'M', u'ɰ'),
+ (0x1DAE, 'M', u'ɲ'),
+ (0x1DAF, 'M', u'ɳ'),
+ (0x1DB0, 'M', u'ɴ'),
+ (0x1DB1, 'M', u'ɵ'),
+ (0x1DB2, 'M', u'ɸ'),
+ (0x1DB3, 'M', u'ʂ'),
+ (0x1DB4, 'M', u'ʃ'),
+ (0x1DB5, 'M', u'ƫ'),
+ (0x1DB6, 'M', u'ʉ'),
+ (0x1DB7, 'M', u'ʊ'),
+ (0x1DB8, 'M', u'ᴜ'),
+ (0x1DB9, 'M', u'ʋ'),
+ (0x1DBA, 'M', u'ʌ'),
+ (0x1DBB, 'M', u'z'),
+ (0x1DBC, 'M', u'ʐ'),
+ (0x1DBD, 'M', u'ʑ'),
+ (0x1DBE, 'M', u'ʒ'),
+ (0x1DBF, 'M', u'θ'),
(0x1DC0, 'V'),
- (0x1E00, 'M', 'ḁ'),
+ (0x1DFA, 'X'),
+ (0x1DFB, 'V'),
+ (0x1E00, 'M', u'ḁ'),
(0x1E01, 'V'),
- (0x1E02, 'M', 'ḃ'),
+ (0x1E02, 'M', u'ḃ'),
(0x1E03, 'V'),
- (0x1E04, 'M', 'ḅ'),
+ (0x1E04, 'M', u'ḅ'),
(0x1E05, 'V'),
- (0x1E06, 'M', 'ḇ'),
+ (0x1E06, 'M', u'ḇ'),
(0x1E07, 'V'),
- (0x1E08, 'M', 'ḉ'),
+ (0x1E08, 'M', u'ḉ'),
(0x1E09, 'V'),
- (0x1E0A, 'M', 'ḋ'),
+ (0x1E0A, 'M', u'ḋ'),
(0x1E0B, 'V'),
- (0x1E0C, 'M', 'ḍ'),
+ (0x1E0C, 'M', u'ḍ'),
(0x1E0D, 'V'),
- (0x1E0E, 'M', 'ḏ'),
+ (0x1E0E, 'M', u'ḏ'),
(0x1E0F, 'V'),
- (0x1E10, 'M', 'ḑ'),
+ (0x1E10, 'M', u'ḑ'),
(0x1E11, 'V'),
- (0x1E12, 'M', 'ḓ'),
+ (0x1E12, 'M', u'ḓ'),
(0x1E13, 'V'),
- (0x1E14, 'M', 'ḕ'),
+ (0x1E14, 'M', u'ḕ'),
(0x1E15, 'V'),
- (0x1E16, 'M', 'ḗ'),
+ (0x1E16, 'M', u'ḗ'),
(0x1E17, 'V'),
- (0x1E18, 'M', 'ḙ'),
+ (0x1E18, 'M', u'ḙ'),
(0x1E19, 'V'),
- (0x1E1A, 'M', 'ḛ'),
+ (0x1E1A, 'M', u'ḛ'),
(0x1E1B, 'V'),
- (0x1E1C, 'M', 'ḝ'),
+ (0x1E1C, 'M', u'ḝ'),
(0x1E1D, 'V'),
- (0x1E1E, 'M', 'ḟ'),
+ (0x1E1E, 'M', u'ḟ'),
(0x1E1F, 'V'),
- (0x1E20, 'M', 'ḡ'),
+ (0x1E20, 'M', u'ḡ'),
(0x1E21, 'V'),
- (0x1E22, 'M', 'ḣ'),
+ (0x1E22, 'M', u'ḣ'),
(0x1E23, 'V'),
- ]
-
-def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1E24, 'M', 'ḥ'),
+ (0x1E24, 'M', u'ḥ'),
(0x1E25, 'V'),
- (0x1E26, 'M', 'ḧ'),
+ (0x1E26, 'M', u'ḧ'),
(0x1E27, 'V'),
- (0x1E28, 'M', 'ḩ'),
+ (0x1E28, 'M', u'ḩ'),
(0x1E29, 'V'),
- (0x1E2A, 'M', 'ḫ'),
+ (0x1E2A, 'M', u'ḫ'),
(0x1E2B, 'V'),
- (0x1E2C, 'M', 'ḭ'),
+ (0x1E2C, 'M', u'ḭ'),
(0x1E2D, 'V'),
- (0x1E2E, 'M', 'ḯ'),
+ (0x1E2E, 'M', u'ḯ'),
(0x1E2F, 'V'),
- (0x1E30, 'M', 'ḱ'),
+ (0x1E30, 'M', u'ḱ'),
(0x1E31, 'V'),
- (0x1E32, 'M', 'ḳ'),
+ (0x1E32, 'M', u'ḳ'),
(0x1E33, 'V'),
- (0x1E34, 'M', 'ḵ'),
+ (0x1E34, 'M', u'ḵ'),
(0x1E35, 'V'),
- (0x1E36, 'M', 'ḷ'),
+ (0x1E36, 'M', u'ḷ'),
(0x1E37, 'V'),
- (0x1E38, 'M', 'ḹ'),
+ (0x1E38, 'M', u'ḹ'),
(0x1E39, 'V'),
- (0x1E3A, 'M', 'ḻ'),
+ (0x1E3A, 'M', u'ḻ'),
(0x1E3B, 'V'),
- (0x1E3C, 'M', 'ḽ'),
+ (0x1E3C, 'M', u'ḽ'),
(0x1E3D, 'V'),
- (0x1E3E, 'M', 'ḿ'),
+ (0x1E3E, 'M', u'ḿ'),
(0x1E3F, 'V'),
- (0x1E40, 'M', 'ṁ'),
+ ]
+
+def _seg_17():
+ return [
+ (0x1E40, 'M', u'ṁ'),
(0x1E41, 'V'),
- (0x1E42, 'M', 'ṃ'),
+ (0x1E42, 'M', u'ṃ'),
(0x1E43, 'V'),
- (0x1E44, 'M', 'ṅ'),
+ (0x1E44, 'M', u'ṅ'),
(0x1E45, 'V'),
- (0x1E46, 'M', 'ṇ'),
+ (0x1E46, 'M', u'ṇ'),
(0x1E47, 'V'),
- (0x1E48, 'M', 'ṉ'),
+ (0x1E48, 'M', u'ṉ'),
(0x1E49, 'V'),
- (0x1E4A, 'M', 'ṋ'),
+ (0x1E4A, 'M', u'ṋ'),
(0x1E4B, 'V'),
- (0x1E4C, 'M', 'ṍ'),
+ (0x1E4C, 'M', u'ṍ'),
(0x1E4D, 'V'),
- (0x1E4E, 'M', 'ṏ'),
+ (0x1E4E, 'M', u'ṏ'),
(0x1E4F, 'V'),
- (0x1E50, 'M', 'ṑ'),
+ (0x1E50, 'M', u'ṑ'),
(0x1E51, 'V'),
- (0x1E52, 'M', 'ṓ'),
+ (0x1E52, 'M', u'ṓ'),
(0x1E53, 'V'),
- (0x1E54, 'M', 'ṕ'),
+ (0x1E54, 'M', u'ṕ'),
(0x1E55, 'V'),
- (0x1E56, 'M', 'ṗ'),
+ (0x1E56, 'M', u'ṗ'),
(0x1E57, 'V'),
- (0x1E58, 'M', 'ṙ'),
+ (0x1E58, 'M', u'ṙ'),
(0x1E59, 'V'),
- (0x1E5A, 'M', 'ṛ'),
+ (0x1E5A, 'M', u'ṛ'),
(0x1E5B, 'V'),
- (0x1E5C, 'M', 'ṝ'),
+ (0x1E5C, 'M', u'ṝ'),
(0x1E5D, 'V'),
- (0x1E5E, 'M', 'ṟ'),
+ (0x1E5E, 'M', u'ṟ'),
(0x1E5F, 'V'),
- (0x1E60, 'M', 'ṡ'),
+ (0x1E60, 'M', u'ṡ'),
(0x1E61, 'V'),
- (0x1E62, 'M', 'ṣ'),
+ (0x1E62, 'M', u'ṣ'),
(0x1E63, 'V'),
- (0x1E64, 'M', 'ṥ'),
+ (0x1E64, 'M', u'ṥ'),
(0x1E65, 'V'),
- (0x1E66, 'M', 'ṧ'),
+ (0x1E66, 'M', u'ṧ'),
(0x1E67, 'V'),
- (0x1E68, 'M', 'ṩ'),
+ (0x1E68, 'M', u'ṩ'),
(0x1E69, 'V'),
- (0x1E6A, 'M', 'ṫ'),
+ (0x1E6A, 'M', u'ṫ'),
(0x1E6B, 'V'),
- (0x1E6C, 'M', 'ṭ'),
+ (0x1E6C, 'M', u'ṭ'),
(0x1E6D, 'V'),
- (0x1E6E, 'M', 'ṯ'),
+ (0x1E6E, 'M', u'ṯ'),
(0x1E6F, 'V'),
- (0x1E70, 'M', 'ṱ'),
+ (0x1E70, 'M', u'ṱ'),
(0x1E71, 'V'),
- (0x1E72, 'M', 'ṳ'),
+ (0x1E72, 'M', u'ṳ'),
(0x1E73, 'V'),
- (0x1E74, 'M', 'ṵ'),
+ (0x1E74, 'M', u'ṵ'),
(0x1E75, 'V'),
- (0x1E76, 'M', 'ṷ'),
+ (0x1E76, 'M', u'ṷ'),
(0x1E77, 'V'),
- (0x1E78, 'M', 'ṹ'),
+ (0x1E78, 'M', u'ṹ'),
(0x1E79, 'V'),
- (0x1E7A, 'M', 'ṻ'),
+ (0x1E7A, 'M', u'ṻ'),
(0x1E7B, 'V'),
- (0x1E7C, 'M', 'ṽ'),
+ (0x1E7C, 'M', u'ṽ'),
(0x1E7D, 'V'),
- (0x1E7E, 'M', 'ṿ'),
+ (0x1E7E, 'M', u'ṿ'),
(0x1E7F, 'V'),
- (0x1E80, 'M', 'ẁ'),
+ (0x1E80, 'M', u'ẁ'),
(0x1E81, 'V'),
- (0x1E82, 'M', 'ẃ'),
+ (0x1E82, 'M', u'ẃ'),
(0x1E83, 'V'),
- (0x1E84, 'M', 'ẅ'),
+ (0x1E84, 'M', u'ẅ'),
(0x1E85, 'V'),
- (0x1E86, 'M', 'ẇ'),
+ (0x1E86, 'M', u'ẇ'),
(0x1E87, 'V'),
- ]
-
-def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1E88, 'M', 'ẉ'),
+ (0x1E88, 'M', u'ẉ'),
(0x1E89, 'V'),
- (0x1E8A, 'M', 'ẋ'),
+ (0x1E8A, 'M', u'ẋ'),
(0x1E8B, 'V'),
- (0x1E8C, 'M', 'ẍ'),
+ (0x1E8C, 'M', u'ẍ'),
(0x1E8D, 'V'),
- (0x1E8E, 'M', 'ẏ'),
+ (0x1E8E, 'M', u'ẏ'),
(0x1E8F, 'V'),
- (0x1E90, 'M', 'ẑ'),
+ (0x1E90, 'M', u'ẑ'),
(0x1E91, 'V'),
- (0x1E92, 'M', 'ẓ'),
+ (0x1E92, 'M', u'ẓ'),
(0x1E93, 'V'),
- (0x1E94, 'M', 'ẕ'),
+ (0x1E94, 'M', u'ẕ'),
(0x1E95, 'V'),
- (0x1E9A, 'M', 'aʾ'),
- (0x1E9B, 'M', 'ṡ'),
+ (0x1E9A, 'M', u'aʾ'),
+ (0x1E9B, 'M', u'ṡ'),
(0x1E9C, 'V'),
- (0x1E9E, 'M', 'ss'),
+ (0x1E9E, 'M', u'ss'),
(0x1E9F, 'V'),
- (0x1EA0, 'M', 'ạ'),
+ (0x1EA0, 'M', u'ạ'),
(0x1EA1, 'V'),
- (0x1EA2, 'M', 'ả'),
+ (0x1EA2, 'M', u'ả'),
(0x1EA3, 'V'),
- (0x1EA4, 'M', 'ấ'),
+ (0x1EA4, 'M', u'ấ'),
(0x1EA5, 'V'),
- (0x1EA6, 'M', 'ầ'),
+ (0x1EA6, 'M', u'ầ'),
(0x1EA7, 'V'),
- (0x1EA8, 'M', 'ẩ'),
+ (0x1EA8, 'M', u'ẩ'),
+ ]
+
+def _seg_18():
+ return [
(0x1EA9, 'V'),
- (0x1EAA, 'M', 'ẫ'),
+ (0x1EAA, 'M', u'ẫ'),
(0x1EAB, 'V'),
- (0x1EAC, 'M', 'ậ'),
+ (0x1EAC, 'M', u'ậ'),
(0x1EAD, 'V'),
- (0x1EAE, 'M', 'ắ'),
+ (0x1EAE, 'M', u'ắ'),
(0x1EAF, 'V'),
- (0x1EB0, 'M', 'ằ'),
+ (0x1EB0, 'M', u'ằ'),
(0x1EB1, 'V'),
- (0x1EB2, 'M', 'ẳ'),
+ (0x1EB2, 'M', u'ẳ'),
(0x1EB3, 'V'),
- (0x1EB4, 'M', 'ẵ'),
+ (0x1EB4, 'M', u'ẵ'),
(0x1EB5, 'V'),
- (0x1EB6, 'M', 'ặ'),
+ (0x1EB6, 'M', u'ặ'),
(0x1EB7, 'V'),
- (0x1EB8, 'M', 'ẹ'),
+ (0x1EB8, 'M', u'ẹ'),
(0x1EB9, 'V'),
- (0x1EBA, 'M', 'ẻ'),
+ (0x1EBA, 'M', u'ẻ'),
(0x1EBB, 'V'),
- (0x1EBC, 'M', 'ẽ'),
+ (0x1EBC, 'M', u'ẽ'),
(0x1EBD, 'V'),
- (0x1EBE, 'M', 'ế'),
+ (0x1EBE, 'M', u'ế'),
(0x1EBF, 'V'),
- (0x1EC0, 'M', 'ề'),
+ (0x1EC0, 'M', u'ề'),
(0x1EC1, 'V'),
- (0x1EC2, 'M', 'ể'),
+ (0x1EC2, 'M', u'ể'),
(0x1EC3, 'V'),
- (0x1EC4, 'M', 'ễ'),
+ (0x1EC4, 'M', u'ễ'),
(0x1EC5, 'V'),
- (0x1EC6, 'M', 'ệ'),
+ (0x1EC6, 'M', u'ệ'),
(0x1EC7, 'V'),
- (0x1EC8, 'M', 'ỉ'),
+ (0x1EC8, 'M', u'ỉ'),
(0x1EC9, 'V'),
- (0x1ECA, 'M', 'ị'),
+ (0x1ECA, 'M', u'ị'),
(0x1ECB, 'V'),
- (0x1ECC, 'M', 'ọ'),
+ (0x1ECC, 'M', u'ọ'),
(0x1ECD, 'V'),
- (0x1ECE, 'M', 'ỏ'),
+ (0x1ECE, 'M', u'ỏ'),
(0x1ECF, 'V'),
- (0x1ED0, 'M', 'ố'),
+ (0x1ED0, 'M', u'ố'),
(0x1ED1, 'V'),
- (0x1ED2, 'M', 'ồ'),
+ (0x1ED2, 'M', u'ồ'),
(0x1ED3, 'V'),
- (0x1ED4, 'M', 'ổ'),
+ (0x1ED4, 'M', u'ổ'),
(0x1ED5, 'V'),
- (0x1ED6, 'M', 'ỗ'),
+ (0x1ED6, 'M', u'ỗ'),
(0x1ED7, 'V'),
- (0x1ED8, 'M', 'ộ'),
+ (0x1ED8, 'M', u'ộ'),
(0x1ED9, 'V'),
- (0x1EDA, 'M', 'ớ'),
+ (0x1EDA, 'M', u'ớ'),
(0x1EDB, 'V'),
- (0x1EDC, 'M', 'ờ'),
+ (0x1EDC, 'M', u'ờ'),
(0x1EDD, 'V'),
- (0x1EDE, 'M', 'ở'),
+ (0x1EDE, 'M', u'ở'),
(0x1EDF, 'V'),
- (0x1EE0, 'M', 'ỡ'),
+ (0x1EE0, 'M', u'ỡ'),
(0x1EE1, 'V'),
- (0x1EE2, 'M', 'ợ'),
+ (0x1EE2, 'M', u'ợ'),
(0x1EE3, 'V'),
- (0x1EE4, 'M', 'ụ'),
+ (0x1EE4, 'M', u'ụ'),
(0x1EE5, 'V'),
- (0x1EE6, 'M', 'ủ'),
+ (0x1EE6, 'M', u'ủ'),
(0x1EE7, 'V'),
- (0x1EE8, 'M', 'ứ'),
+ (0x1EE8, 'M', u'ứ'),
(0x1EE9, 'V'),
- (0x1EEA, 'M', 'ừ'),
+ (0x1EEA, 'M', u'ừ'),
(0x1EEB, 'V'),
- (0x1EEC, 'M', 'ử'),
+ (0x1EEC, 'M', u'ử'),
(0x1EED, 'V'),
- (0x1EEE, 'M', 'ữ'),
+ (0x1EEE, 'M', u'ữ'),
(0x1EEF, 'V'),
- (0x1EF0, 'M', 'ự'),
- ]
-
-def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
+ (0x1EF0, 'M', u'ự'),
(0x1EF1, 'V'),
- (0x1EF2, 'M', 'ỳ'),
+ (0x1EF2, 'M', u'ỳ'),
(0x1EF3, 'V'),
- (0x1EF4, 'M', 'ỵ'),
+ (0x1EF4, 'M', u'ỵ'),
(0x1EF5, 'V'),
- (0x1EF6, 'M', 'ỷ'),
+ (0x1EF6, 'M', u'ỷ'),
(0x1EF7, 'V'),
- (0x1EF8, 'M', 'ỹ'),
+ (0x1EF8, 'M', u'ỹ'),
(0x1EF9, 'V'),
- (0x1EFA, 'M', 'ỻ'),
+ (0x1EFA, 'M', u'ỻ'),
(0x1EFB, 'V'),
- (0x1EFC, 'M', 'ỽ'),
+ (0x1EFC, 'M', u'ỽ'),
(0x1EFD, 'V'),
- (0x1EFE, 'M', 'ỿ'),
+ (0x1EFE, 'M', u'ỿ'),
(0x1EFF, 'V'),
- (0x1F08, 'M', 'ἀ'),
- (0x1F09, 'M', 'ἁ'),
- (0x1F0A, 'M', 'ἂ'),
- (0x1F0B, 'M', 'ἃ'),
- (0x1F0C, 'M', 'ἄ'),
- (0x1F0D, 'M', 'ἅ'),
- (0x1F0E, 'M', 'ἆ'),
- (0x1F0F, 'M', 'ἇ'),
+ (0x1F08, 'M', u'ἀ'),
+ (0x1F09, 'M', u'ἁ'),
+ (0x1F0A, 'M', u'ἂ'),
+ (0x1F0B, 'M', u'ἃ'),
+ (0x1F0C, 'M', u'ἄ'),
+ (0x1F0D, 'M', u'ἅ'),
+ (0x1F0E, 'M', u'ἆ'),
+ (0x1F0F, 'M', u'ἇ'),
(0x1F10, 'V'),
(0x1F16, 'X'),
- (0x1F18, 'M', 'ἐ'),
- (0x1F19, 'M', 'ἑ'),
- (0x1F1A, 'M', 'ἒ'),
- (0x1F1B, 'M', 'ἓ'),
- (0x1F1C, 'M', 'ἔ'),
- (0x1F1D, 'M', 'ἕ'),
+ (0x1F18, 'M', u'ἐ'),
+ (0x1F19, 'M', u'ἑ'),
+ (0x1F1A, 'M', u'ἒ'),
+ ]
+
+def _seg_19():
+ return [
+ (0x1F1B, 'M', u'ἓ'),
+ (0x1F1C, 'M', u'ἔ'),
+ (0x1F1D, 'M', u'ἕ'),
(0x1F1E, 'X'),
(0x1F20, 'V'),
- (0x1F28, 'M', 'ἠ'),
- (0x1F29, 'M', 'ἡ'),
- (0x1F2A, 'M', 'ἢ'),
- (0x1F2B, 'M', 'ἣ'),
- (0x1F2C, 'M', 'ἤ'),
- (0x1F2D, 'M', 'ἥ'),
- (0x1F2E, 'M', 'ἦ'),
- (0x1F2F, 'M', 'ἧ'),
+ (0x1F28, 'M', u'ἠ'),
+ (0x1F29, 'M', u'ἡ'),
+ (0x1F2A, 'M', u'ἢ'),
+ (0x1F2B, 'M', u'ἣ'),
+ (0x1F2C, 'M', u'ἤ'),
+ (0x1F2D, 'M', u'ἥ'),
+ (0x1F2E, 'M', u'ἦ'),
+ (0x1F2F, 'M', u'ἧ'),
(0x1F30, 'V'),
- (0x1F38, 'M', 'ἰ'),
- (0x1F39, 'M', 'ἱ'),
- (0x1F3A, 'M', 'ἲ'),
- (0x1F3B, 'M', 'ἳ'),
- (0x1F3C, 'M', 'ἴ'),
- (0x1F3D, 'M', 'ἵ'),
- (0x1F3E, 'M', 'ἶ'),
- (0x1F3F, 'M', 'ἷ'),
+ (0x1F38, 'M', u'ἰ'),
+ (0x1F39, 'M', u'ἱ'),
+ (0x1F3A, 'M', u'ἲ'),
+ (0x1F3B, 'M', u'ἳ'),
+ (0x1F3C, 'M', u'ἴ'),
+ (0x1F3D, 'M', u'ἵ'),
+ (0x1F3E, 'M', u'ἶ'),
+ (0x1F3F, 'M', u'ἷ'),
(0x1F40, 'V'),
(0x1F46, 'X'),
- (0x1F48, 'M', 'ὀ'),
- (0x1F49, 'M', 'ὁ'),
- (0x1F4A, 'M', 'ὂ'),
- (0x1F4B, 'M', 'ὃ'),
- (0x1F4C, 'M', 'ὄ'),
- (0x1F4D, 'M', 'ὅ'),
+ (0x1F48, 'M', u'ὀ'),
+ (0x1F49, 'M', u'ὁ'),
+ (0x1F4A, 'M', u'ὂ'),
+ (0x1F4B, 'M', u'ὃ'),
+ (0x1F4C, 'M', u'ὄ'),
+ (0x1F4D, 'M', u'ὅ'),
(0x1F4E, 'X'),
(0x1F50, 'V'),
(0x1F58, 'X'),
- (0x1F59, 'M', 'ὑ'),
+ (0x1F59, 'M', u'ὑ'),
(0x1F5A, 'X'),
- (0x1F5B, 'M', 'ὓ'),
+ (0x1F5B, 'M', u'ὓ'),
(0x1F5C, 'X'),
- (0x1F5D, 'M', 'ὕ'),
+ (0x1F5D, 'M', u'ὕ'),
(0x1F5E, 'X'),
- (0x1F5F, 'M', 'ὗ'),
+ (0x1F5F, 'M', u'ὗ'),
(0x1F60, 'V'),
- (0x1F68, 'M', 'ὠ'),
- (0x1F69, 'M', 'ὡ'),
- (0x1F6A, 'M', 'ὢ'),
- (0x1F6B, 'M', 'ὣ'),
- (0x1F6C, 'M', 'ὤ'),
- (0x1F6D, 'M', 'ὥ'),
- (0x1F6E, 'M', 'ὦ'),
- (0x1F6F, 'M', 'ὧ'),
+ (0x1F68, 'M', u'ὠ'),
+ (0x1F69, 'M', u'ὡ'),
+ (0x1F6A, 'M', u'ὢ'),
+ (0x1F6B, 'M', u'ὣ'),
+ (0x1F6C, 'M', u'ὤ'),
+ (0x1F6D, 'M', u'ὥ'),
+ (0x1F6E, 'M', u'ὦ'),
+ (0x1F6F, 'M', u'ὧ'),
(0x1F70, 'V'),
- (0x1F71, 'M', 'ά'),
+ (0x1F71, 'M', u'ά'),
(0x1F72, 'V'),
- (0x1F73, 'M', 'έ'),
+ (0x1F73, 'M', u'έ'),
(0x1F74, 'V'),
- (0x1F75, 'M', 'ή'),
+ (0x1F75, 'M', u'ή'),
(0x1F76, 'V'),
- (0x1F77, 'M', 'ί'),
+ (0x1F77, 'M', u'ί'),
(0x1F78, 'V'),
- (0x1F79, 'M', 'ό'),
+ (0x1F79, 'M', u'ό'),
(0x1F7A, 'V'),
- (0x1F7B, 'M', 'ύ'),
+ (0x1F7B, 'M', u'ύ'),
(0x1F7C, 'V'),
- (0x1F7D, 'M', 'ώ'),
+ (0x1F7D, 'M', u'ώ'),
(0x1F7E, 'X'),
- (0x1F80, 'M', 'ἀι'),
- (0x1F81, 'M', 'ἁι'),
- (0x1F82, 'M', 'ἂι'),
- (0x1F83, 'M', 'ἃι'),
- (0x1F84, 'M', 'ἄι'),
- (0x1F85, 'M', 'ἅι'),
- (0x1F86, 'M', 'ἆι'),
- (0x1F87, 'M', 'ἇι'),
+ (0x1F80, 'M', u'ἀι'),
+ (0x1F81, 'M', u'ἁι'),
+ (0x1F82, 'M', u'ἂι'),
+ (0x1F83, 'M', u'ἃι'),
+ (0x1F84, 'M', u'ἄι'),
+ (0x1F85, 'M', u'ἅι'),
+ (0x1F86, 'M', u'ἆι'),
+ (0x1F87, 'M', u'ἇι'),
+ (0x1F88, 'M', u'ἀι'),
+ (0x1F89, 'M', u'ἁι'),
+ (0x1F8A, 'M', u'ἂι'),
+ (0x1F8B, 'M', u'ἃι'),
+ (0x1F8C, 'M', u'ἄι'),
+ (0x1F8D, 'M', u'ἅι'),
+ (0x1F8E, 'M', u'ἆι'),
+ (0x1F8F, 'M', u'ἇι'),
+ (0x1F90, 'M', u'ἠι'),
+ (0x1F91, 'M', u'ἡι'),
+ (0x1F92, 'M', u'ἢι'),
+ (0x1F93, 'M', u'ἣι'),
+ (0x1F94, 'M', u'ἤι'),
+ (0x1F95, 'M', u'ἥι'),
+ (0x1F96, 'M', u'ἦι'),
+ (0x1F97, 'M', u'ἧι'),
+ (0x1F98, 'M', u'ἠι'),
+ (0x1F99, 'M', u'ἡι'),
+ (0x1F9A, 'M', u'ἢι'),
+ (0x1F9B, 'M', u'ἣι'),
+ (0x1F9C, 'M', u'ἤι'),
+ (0x1F9D, 'M', u'ἥι'),
+ (0x1F9E, 'M', u'ἦι'),
+ (0x1F9F, 'M', u'ἧι'),
+ (0x1FA0, 'M', u'ὠι'),
+ (0x1FA1, 'M', u'ὡι'),
+ (0x1FA2, 'M', u'ὢι'),
+ (0x1FA3, 'M', u'ὣι'),
]
-def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_20():
return [
- (0x1F88, 'M', 'ἀι'),
- (0x1F89, 'M', 'ἁι'),
- (0x1F8A, 'M', 'ἂι'),
- (0x1F8B, 'M', 'ἃι'),
- (0x1F8C, 'M', 'ἄι'),
- (0x1F8D, 'M', 'ἅι'),
- (0x1F8E, 'M', 'ἆι'),
- (0x1F8F, 'M', 'ἇι'),
- (0x1F90, 'M', 'ἠι'),
- (0x1F91, 'M', 'ἡι'),
- (0x1F92, 'M', 'ἢι'),
- (0x1F93, 'M', 'ἣι'),
- (0x1F94, 'M', 'ἤι'),
- (0x1F95, 'M', 'ἥι'),
- (0x1F96, 'M', 'ἦι'),
- (0x1F97, 'M', 'ἧι'),
- (0x1F98, 'M', 'ἠι'),
- (0x1F99, 'M', 'ἡι'),
- (0x1F9A, 'M', 'ἢι'),
- (0x1F9B, 'M', 'ἣι'),
- (0x1F9C, 'M', 'ἤι'),
- (0x1F9D, 'M', 'ἥι'),
- (0x1F9E, 'M', 'ἦι'),
- (0x1F9F, 'M', 'ἧι'),
- (0x1FA0, 'M', 'ὠι'),
- (0x1FA1, 'M', 'ὡι'),
- (0x1FA2, 'M', 'ὢι'),
- (0x1FA3, 'M', 'ὣι'),
- (0x1FA4, 'M', 'ὤι'),
- (0x1FA5, 'M', 'ὥι'),
- (0x1FA6, 'M', 'ὦι'),
- (0x1FA7, 'M', 'ὧι'),
- (0x1FA8, 'M', 'ὠι'),
- (0x1FA9, 'M', 'ὡι'),
- (0x1FAA, 'M', 'ὢι'),
- (0x1FAB, 'M', 'ὣι'),
- (0x1FAC, 'M', 'ὤι'),
- (0x1FAD, 'M', 'ὥι'),
- (0x1FAE, 'M', 'ὦι'),
- (0x1FAF, 'M', 'ὧι'),
+ (0x1FA4, 'M', u'ὤι'),
+ (0x1FA5, 'M', u'ὥι'),
+ (0x1FA6, 'M', u'ὦι'),
+ (0x1FA7, 'M', u'ὧι'),
+ (0x1FA8, 'M', u'ὠι'),
+ (0x1FA9, 'M', u'ὡι'),
+ (0x1FAA, 'M', u'ὢι'),
+ (0x1FAB, 'M', u'ὣι'),
+ (0x1FAC, 'M', u'ὤι'),
+ (0x1FAD, 'M', u'ὥι'),
+ (0x1FAE, 'M', u'ὦι'),
+ (0x1FAF, 'M', u'ὧι'),
(0x1FB0, 'V'),
- (0x1FB2, 'M', 'ὰι'),
- (0x1FB3, 'M', 'αι'),
- (0x1FB4, 'M', 'άι'),
+ (0x1FB2, 'M', u'ὰι'),
+ (0x1FB3, 'M', u'αι'),
+ (0x1FB4, 'M', u'άι'),
(0x1FB5, 'X'),
(0x1FB6, 'V'),
- (0x1FB7, 'M', 'ᾶι'),
- (0x1FB8, 'M', 'ᾰ'),
- (0x1FB9, 'M', 'ᾱ'),
- (0x1FBA, 'M', 'ὰ'),
- (0x1FBB, 'M', 'ά'),
- (0x1FBC, 'M', 'αι'),
- (0x1FBD, '3', ' ̓'),
- (0x1FBE, 'M', 'ι'),
- (0x1FBF, '3', ' ̓'),
- (0x1FC0, '3', ' ͂'),
- (0x1FC1, '3', ' ̈͂'),
- (0x1FC2, 'M', 'ὴι'),
- (0x1FC3, 'M', 'ηι'),
- (0x1FC4, 'M', 'ήι'),
+ (0x1FB7, 'M', u'ᾶι'),
+ (0x1FB8, 'M', u'ᾰ'),
+ (0x1FB9, 'M', u'ᾱ'),
+ (0x1FBA, 'M', u'ὰ'),
+ (0x1FBB, 'M', u'ά'),
+ (0x1FBC, 'M', u'αι'),
+ (0x1FBD, '3', u' ̓'),
+ (0x1FBE, 'M', u'ι'),
+ (0x1FBF, '3', u' ̓'),
+ (0x1FC0, '3', u' ͂'),
+ (0x1FC1, '3', u' ̈͂'),
+ (0x1FC2, 'M', u'ὴι'),
+ (0x1FC3, 'M', u'ηι'),
+ (0x1FC4, 'M', u'ήι'),
(0x1FC5, 'X'),
(0x1FC6, 'V'),
- (0x1FC7, 'M', 'ῆι'),
- (0x1FC8, 'M', 'ὲ'),
- (0x1FC9, 'M', 'έ'),
- (0x1FCA, 'M', 'ὴ'),
- (0x1FCB, 'M', 'ή'),
- (0x1FCC, 'M', 'ηι'),
- (0x1FCD, '3', ' ̓̀'),
- (0x1FCE, '3', ' ̓́'),
- (0x1FCF, '3', ' ̓͂'),
+ (0x1FC7, 'M', u'ῆι'),
+ (0x1FC8, 'M', u'ὲ'),
+ (0x1FC9, 'M', u'έ'),
+ (0x1FCA, 'M', u'ὴ'),
+ (0x1FCB, 'M', u'ή'),
+ (0x1FCC, 'M', u'ηι'),
+ (0x1FCD, '3', u' ̓̀'),
+ (0x1FCE, '3', u' ̓́'),
+ (0x1FCF, '3', u' ̓͂'),
(0x1FD0, 'V'),
- (0x1FD3, 'M', 'ΐ'),
+ (0x1FD3, 'M', u'ΐ'),
(0x1FD4, 'X'),
(0x1FD6, 'V'),
- (0x1FD8, 'M', 'ῐ'),
- (0x1FD9, 'M', 'ῑ'),
- (0x1FDA, 'M', 'ὶ'),
- (0x1FDB, 'M', 'ί'),
+ (0x1FD8, 'M', u'ῐ'),
+ (0x1FD9, 'M', u'ῑ'),
+ (0x1FDA, 'M', u'ὶ'),
+ (0x1FDB, 'M', u'ί'),
(0x1FDC, 'X'),
- (0x1FDD, '3', ' ̔̀'),
- (0x1FDE, '3', ' ̔́'),
- (0x1FDF, '3', ' ̔͂'),
+ (0x1FDD, '3', u' ̔̀'),
+ (0x1FDE, '3', u' ̔́'),
+ (0x1FDF, '3', u' ̔͂'),
(0x1FE0, 'V'),
- (0x1FE3, 'M', 'ΰ'),
+ (0x1FE3, 'M', u'ΰ'),
(0x1FE4, 'V'),
- (0x1FE8, 'M', 'ῠ'),
- (0x1FE9, 'M', 'ῡ'),
- (0x1FEA, 'M', 'ὺ'),
- (0x1FEB, 'M', 'ύ'),
- (0x1FEC, 'M', 'ῥ'),
- (0x1FED, '3', ' ̈̀'),
- (0x1FEE, '3', ' ̈́'),
- (0x1FEF, '3', '`'),
+ (0x1FE8, 'M', u'ῠ'),
+ (0x1FE9, 'M', u'ῡ'),
+ (0x1FEA, 'M', u'ὺ'),
+ (0x1FEB, 'M', u'ύ'),
+ (0x1FEC, 'M', u'ῥ'),
+ (0x1FED, '3', u' ̈̀'),
+ (0x1FEE, '3', u' ̈́'),
+ (0x1FEF, '3', u'`'),
(0x1FF0, 'X'),
- (0x1FF2, 'M', 'ὼι'),
- (0x1FF3, 'M', 'ωι'),
- (0x1FF4, 'M', 'ώι'),
+ (0x1FF2, 'M', u'ὼι'),
+ (0x1FF3, 'M', u'ωι'),
+ (0x1FF4, 'M', u'ώι'),
(0x1FF5, 'X'),
(0x1FF6, 'V'),
- ]
-
-def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1FF7, 'M', 'ῶι'),
- (0x1FF8, 'M', 'ὸ'),
- (0x1FF9, 'M', 'ό'),
- (0x1FFA, 'M', 'ὼ'),
- (0x1FFB, 'M', 'ώ'),
- (0x1FFC, 'M', 'ωι'),
- (0x1FFD, '3', ' ́'),
- (0x1FFE, '3', ' ̔'),
+ (0x1FF7, 'M', u'ῶι'),
+ (0x1FF8, 'M', u'ὸ'),
+ (0x1FF9, 'M', u'ό'),
+ (0x1FFA, 'M', u'ὼ'),
+ (0x1FFB, 'M', u'ώ'),
+ (0x1FFC, 'M', u'ωι'),
+ (0x1FFD, '3', u' ́'),
+ (0x1FFE, '3', u' ̔'),
(0x1FFF, 'X'),
- (0x2000, '3', ' '),
+ (0x2000, '3', u' '),
(0x200B, 'I'),
- (0x200C, 'D', ''),
+ (0x200C, 'D', u''),
(0x200E, 'X'),
(0x2010, 'V'),
- (0x2011, 'M', '‐'),
+ (0x2011, 'M', u'‐'),
(0x2012, 'V'),
- (0x2017, '3', ' ̳'),
+ (0x2017, '3', u' ̳'),
(0x2018, 'V'),
(0x2024, 'X'),
(0x2027, 'V'),
(0x2028, 'X'),
- (0x202F, '3', ' '),
+ (0x202F, '3', u' '),
(0x2030, 'V'),
- (0x2033, 'M', '′′'),
- (0x2034, 'M', '′′′'),
+ (0x2033, 'M', u'′′'),
+ (0x2034, 'M', u'′′′'),
(0x2035, 'V'),
- (0x2036, 'M', '‵‵'),
- (0x2037, 'M', '‵‵‵'),
+ (0x2036, 'M', u'‵‵'),
+ (0x2037, 'M', u'‵‵‵'),
+ ]
+
+def _seg_21():
+ return [
(0x2038, 'V'),
- (0x203C, '3', '!!'),
+ (0x203C, '3', u'!!'),
(0x203D, 'V'),
- (0x203E, '3', ' ̅'),
+ (0x203E, '3', u' ̅'),
(0x203F, 'V'),
- (0x2047, '3', '??'),
- (0x2048, '3', '?!'),
- (0x2049, '3', '!?'),
+ (0x2047, '3', u'??'),
+ (0x2048, '3', u'?!'),
+ (0x2049, '3', u'!?'),
(0x204A, 'V'),
- (0x2057, 'M', '′′′′'),
+ (0x2057, 'M', u'′′′′'),
(0x2058, 'V'),
- (0x205F, '3', ' '),
+ (0x205F, '3', u' '),
(0x2060, 'I'),
(0x2061, 'X'),
(0x2064, 'I'),
(0x2065, 'X'),
- (0x2070, 'M', '0'),
- (0x2071, 'M', 'i'),
+ (0x2070, 'M', u'0'),
+ (0x2071, 'M', u'i'),
(0x2072, 'X'),
- (0x2074, 'M', '4'),
- (0x2075, 'M', '5'),
- (0x2076, 'M', '6'),
- (0x2077, 'M', '7'),
- (0x2078, 'M', '8'),
- (0x2079, 'M', '9'),
- (0x207A, '3', '+'),
- (0x207B, 'M', '−'),
- (0x207C, '3', '='),
- (0x207D, '3', '('),
- (0x207E, '3', ')'),
- (0x207F, 'M', 'n'),
- (0x2080, 'M', '0'),
- (0x2081, 'M', '1'),
- (0x2082, 'M', '2'),
- (0x2083, 'M', '3'),
- (0x2084, 'M', '4'),
- (0x2085, 'M', '5'),
- (0x2086, 'M', '6'),
- (0x2087, 'M', '7'),
- (0x2088, 'M', '8'),
- (0x2089, 'M', '9'),
- (0x208A, '3', '+'),
- (0x208B, 'M', '−'),
- (0x208C, '3', '='),
- (0x208D, '3', '('),
- (0x208E, '3', ')'),
+ (0x2074, 'M', u'4'),
+ (0x2075, 'M', u'5'),
+ (0x2076, 'M', u'6'),
+ (0x2077, 'M', u'7'),
+ (0x2078, 'M', u'8'),
+ (0x2079, 'M', u'9'),
+ (0x207A, '3', u'+'),
+ (0x207B, 'M', u'−'),
+ (0x207C, '3', u'='),
+ (0x207D, '3', u'('),
+ (0x207E, '3', u')'),
+ (0x207F, 'M', u'n'),
+ (0x2080, 'M', u'0'),
+ (0x2081, 'M', u'1'),
+ (0x2082, 'M', u'2'),
+ (0x2083, 'M', u'3'),
+ (0x2084, 'M', u'4'),
+ (0x2085, 'M', u'5'),
+ (0x2086, 'M', u'6'),
+ (0x2087, 'M', u'7'),
+ (0x2088, 'M', u'8'),
+ (0x2089, 'M', u'9'),
+ (0x208A, '3', u'+'),
+ (0x208B, 'M', u'−'),
+ (0x208C, '3', u'='),
+ (0x208D, '3', u'('),
+ (0x208E, '3', u')'),
(0x208F, 'X'),
- (0x2090, 'M', 'a'),
- (0x2091, 'M', 'e'),
- (0x2092, 'M', 'o'),
- (0x2093, 'M', 'x'),
- (0x2094, 'M', 'ə'),
- (0x2095, 'M', 'h'),
- (0x2096, 'M', 'k'),
- (0x2097, 'M', 'l'),
- (0x2098, 'M', 'm'),
- (0x2099, 'M', 'n'),
- (0x209A, 'M', 'p'),
- (0x209B, 'M', 's'),
- (0x209C, 'M', 't'),
+ (0x2090, 'M', u'a'),
+ (0x2091, 'M', u'e'),
+ (0x2092, 'M', u'o'),
+ (0x2093, 'M', u'x'),
+ (0x2094, 'M', u'ə'),
+ (0x2095, 'M', u'h'),
+ (0x2096, 'M', u'k'),
+ (0x2097, 'M', u'l'),
+ (0x2098, 'M', u'm'),
+ (0x2099, 'M', u'n'),
+ (0x209A, 'M', u'p'),
+ (0x209B, 'M', u's'),
+ (0x209C, 'M', u't'),
(0x209D, 'X'),
(0x20A0, 'V'),
- (0x20A8, 'M', 'rs'),
+ (0x20A8, 'M', u'rs'),
(0x20A9, 'V'),
- (0x20C1, 'X'),
+ (0x20C0, 'X'),
(0x20D0, 'V'),
(0x20F1, 'X'),
- (0x2100, '3', 'a/c'),
- (0x2101, '3', 'a/s'),
- (0x2102, 'M', 'c'),
- (0x2103, 'M', '°c'),
+ (0x2100, '3', u'a/c'),
+ (0x2101, '3', u'a/s'),
+ (0x2102, 'M', u'c'),
+ (0x2103, 'M', u'°c'),
(0x2104, 'V'),
- ]
-
-def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x2105, '3', 'c/o'),
- (0x2106, '3', 'c/u'),
- (0x2107, 'M', 'ɛ'),
+ (0x2105, '3', u'c/o'),
+ (0x2106, '3', u'c/u'),
+ (0x2107, 'M', u'ɛ'),
(0x2108, 'V'),
- (0x2109, 'M', '°f'),
- (0x210A, 'M', 'g'),
- (0x210B, 'M', 'h'),
- (0x210F, 'M', 'ħ'),
- (0x2110, 'M', 'i'),
- (0x2112, 'M', 'l'),
+ (0x2109, 'M', u'°f'),
+ (0x210A, 'M', u'g'),
+ (0x210B, 'M', u'h'),
+ (0x210F, 'M', u'ħ'),
+ (0x2110, 'M', u'i'),
+ (0x2112, 'M', u'l'),
(0x2114, 'V'),
- (0x2115, 'M', 'n'),
- (0x2116, 'M', 'no'),
+ (0x2115, 'M', u'n'),
+ (0x2116, 'M', u'no'),
(0x2117, 'V'),
- (0x2119, 'M', 'p'),
- (0x211A, 'M', 'q'),
- (0x211B, 'M', 'r'),
+ (0x2119, 'M', u'p'),
+ (0x211A, 'M', u'q'),
+ (0x211B, 'M', u'r'),
(0x211E, 'V'),
- (0x2120, 'M', 'sm'),
- (0x2121, 'M', 'tel'),
- (0x2122, 'M', 'tm'),
+ (0x2120, 'M', u'sm'),
+ (0x2121, 'M', u'tel'),
+ (0x2122, 'M', u'tm'),
(0x2123, 'V'),
- (0x2124, 'M', 'z'),
+ (0x2124, 'M', u'z'),
(0x2125, 'V'),
- (0x2126, 'M', 'ω'),
+ (0x2126, 'M', u'ω'),
(0x2127, 'V'),
- (0x2128, 'M', 'z'),
+ (0x2128, 'M', u'z'),
(0x2129, 'V'),
- (0x212A, 'M', 'k'),
- (0x212B, 'M', 'å'),
- (0x212C, 'M', 'b'),
- (0x212D, 'M', 'c'),
+ ]
+
+def _seg_22():
+ return [
+ (0x212A, 'M', u'k'),
+ (0x212B, 'M', u'å'),
+ (0x212C, 'M', u'b'),
+ (0x212D, 'M', u'c'),
(0x212E, 'V'),
- (0x212F, 'M', 'e'),
- (0x2131, 'M', 'f'),
+ (0x212F, 'M', u'e'),
+ (0x2131, 'M', u'f'),
(0x2132, 'X'),
- (0x2133, 'M', 'm'),
- (0x2134, 'M', 'o'),
- (0x2135, 'M', 'א'),
- (0x2136, 'M', 'ב'),
- (0x2137, 'M', 'ג'),
- (0x2138, 'M', 'ד'),
- (0x2139, 'M', 'i'),
+ (0x2133, 'M', u'm'),
+ (0x2134, 'M', u'o'),
+ (0x2135, 'M', u'א'),
+ (0x2136, 'M', u'ב'),
+ (0x2137, 'M', u'ג'),
+ (0x2138, 'M', u'ד'),
+ (0x2139, 'M', u'i'),
(0x213A, 'V'),
- (0x213B, 'M', 'fax'),
- (0x213C, 'M', 'π'),
- (0x213D, 'M', 'γ'),
- (0x213F, 'M', 'π'),
- (0x2140, 'M', '∑'),
+ (0x213B, 'M', u'fax'),
+ (0x213C, 'M', u'π'),
+ (0x213D, 'M', u'γ'),
+ (0x213F, 'M', u'π'),
+ (0x2140, 'M', u'∑'),
(0x2141, 'V'),
- (0x2145, 'M', 'd'),
- (0x2147, 'M', 'e'),
- (0x2148, 'M', 'i'),
- (0x2149, 'M', 'j'),
+ (0x2145, 'M', u'd'),
+ (0x2147, 'M', u'e'),
+ (0x2148, 'M', u'i'),
+ (0x2149, 'M', u'j'),
(0x214A, 'V'),
- (0x2150, 'M', '1⁄7'),
- (0x2151, 'M', '1⁄9'),
- (0x2152, 'M', '1⁄10'),
- (0x2153, 'M', '1⁄3'),
- (0x2154, 'M', '2⁄3'),
- (0x2155, 'M', '1⁄5'),
- (0x2156, 'M', '2⁄5'),
- (0x2157, 'M', '3⁄5'),
- (0x2158, 'M', '4⁄5'),
- (0x2159, 'M', '1⁄6'),
- (0x215A, 'M', '5⁄6'),
- (0x215B, 'M', '1⁄8'),
- (0x215C, 'M', '3⁄8'),
- (0x215D, 'M', '5⁄8'),
- (0x215E, 'M', '7⁄8'),
- (0x215F, 'M', '1⁄'),
- (0x2160, 'M', 'i'),
- (0x2161, 'M', 'ii'),
- (0x2162, 'M', 'iii'),
- (0x2163, 'M', 'iv'),
- (0x2164, 'M', 'v'),
- (0x2165, 'M', 'vi'),
- (0x2166, 'M', 'vii'),
- (0x2167, 'M', 'viii'),
- (0x2168, 'M', 'ix'),
- (0x2169, 'M', 'x'),
- (0x216A, 'M', 'xi'),
- (0x216B, 'M', 'xii'),
- (0x216C, 'M', 'l'),
- (0x216D, 'M', 'c'),
- (0x216E, 'M', 'd'),
- (0x216F, 'M', 'm'),
- (0x2170, 'M', 'i'),
- (0x2171, 'M', 'ii'),
- (0x2172, 'M', 'iii'),
- (0x2173, 'M', 'iv'),
- (0x2174, 'M', 'v'),
- (0x2175, 'M', 'vi'),
- (0x2176, 'M', 'vii'),
- (0x2177, 'M', 'viii'),
- (0x2178, 'M', 'ix'),
- (0x2179, 'M', 'x'),
- (0x217A, 'M', 'xi'),
- (0x217B, 'M', 'xii'),
- (0x217C, 'M', 'l'),
- ]
-
-def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x217D, 'M', 'c'),
- (0x217E, 'M', 'd'),
- (0x217F, 'M', 'm'),
+ (0x2150, 'M', u'1⁄7'),
+ (0x2151, 'M', u'1⁄9'),
+ (0x2152, 'M', u'1⁄10'),
+ (0x2153, 'M', u'1⁄3'),
+ (0x2154, 'M', u'2⁄3'),
+ (0x2155, 'M', u'1⁄5'),
+ (0x2156, 'M', u'2⁄5'),
+ (0x2157, 'M', u'3⁄5'),
+ (0x2158, 'M', u'4⁄5'),
+ (0x2159, 'M', u'1⁄6'),
+ (0x215A, 'M', u'5⁄6'),
+ (0x215B, 'M', u'1⁄8'),
+ (0x215C, 'M', u'3⁄8'),
+ (0x215D, 'M', u'5⁄8'),
+ (0x215E, 'M', u'7⁄8'),
+ (0x215F, 'M', u'1⁄'),
+ (0x2160, 'M', u'i'),
+ (0x2161, 'M', u'ii'),
+ (0x2162, 'M', u'iii'),
+ (0x2163, 'M', u'iv'),
+ (0x2164, 'M', u'v'),
+ (0x2165, 'M', u'vi'),
+ (0x2166, 'M', u'vii'),
+ (0x2167, 'M', u'viii'),
+ (0x2168, 'M', u'ix'),
+ (0x2169, 'M', u'x'),
+ (0x216A, 'M', u'xi'),
+ (0x216B, 'M', u'xii'),
+ (0x216C, 'M', u'l'),
+ (0x216D, 'M', u'c'),
+ (0x216E, 'M', u'd'),
+ (0x216F, 'M', u'm'),
+ (0x2170, 'M', u'i'),
+ (0x2171, 'M', u'ii'),
+ (0x2172, 'M', u'iii'),
+ (0x2173, 'M', u'iv'),
+ (0x2174, 'M', u'v'),
+ (0x2175, 'M', u'vi'),
+ (0x2176, 'M', u'vii'),
+ (0x2177, 'M', u'viii'),
+ (0x2178, 'M', u'ix'),
+ (0x2179, 'M', u'x'),
+ (0x217A, 'M', u'xi'),
+ (0x217B, 'M', u'xii'),
+ (0x217C, 'M', u'l'),
+ (0x217D, 'M', u'c'),
+ (0x217E, 'M', u'd'),
+ (0x217F, 'M', u'm'),
(0x2180, 'V'),
(0x2183, 'X'),
(0x2184, 'V'),
- (0x2189, 'M', '0⁄3'),
+ (0x2189, 'M', u'0⁄3'),
(0x218A, 'V'),
(0x218C, 'X'),
(0x2190, 'V'),
- (0x222C, 'M', '∫∫'),
- (0x222D, 'M', '∫∫∫'),
+ (0x222C, 'M', u'∫∫'),
+ (0x222D, 'M', u'∫∫∫'),
(0x222E, 'V'),
- (0x222F, 'M', '∮∮'),
- (0x2230, 'M', '∮∮∮'),
+ (0x222F, 'M', u'∮∮'),
+ (0x2230, 'M', u'∮∮∮'),
(0x2231, 'V'),
(0x2260, '3'),
(0x2261, 'V'),
(0x226E, '3'),
(0x2270, 'V'),
- (0x2329, 'M', '〈'),
- (0x232A, 'M', '〉'),
+ (0x2329, 'M', u'〈'),
+ (0x232A, 'M', u'〉'),
(0x232B, 'V'),
(0x2427, 'X'),
(0x2440, 'V'),
(0x244B, 'X'),
- (0x2460, 'M', '1'),
- (0x2461, 'M', '2'),
- (0x2462, 'M', '3'),
- (0x2463, 'M', '4'),
- (0x2464, 'M', '5'),
- (0x2465, 'M', '6'),
- (0x2466, 'M', '7'),
- (0x2467, 'M', '8'),
- (0x2468, 'M', '9'),
- (0x2469, 'M', '10'),
- (0x246A, 'M', '11'),
- (0x246B, 'M', '12'),
- (0x246C, 'M', '13'),
- (0x246D, 'M', '14'),
- (0x246E, 'M', '15'),
- (0x246F, 'M', '16'),
- (0x2470, 'M', '17'),
- (0x2471, 'M', '18'),
- (0x2472, 'M', '19'),
- (0x2473, 'M', '20'),
- (0x2474, '3', '(1)'),
- (0x2475, '3', '(2)'),
- (0x2476, '3', '(3)'),
- (0x2477, '3', '(4)'),
- (0x2478, '3', '(5)'),
- (0x2479, '3', '(6)'),
- (0x247A, '3', '(7)'),
- (0x247B, '3', '(8)'),
- (0x247C, '3', '(9)'),
- (0x247D, '3', '(10)'),
- (0x247E, '3', '(11)'),
- (0x247F, '3', '(12)'),
- (0x2480, '3', '(13)'),
- (0x2481, '3', '(14)'),
- (0x2482, '3', '(15)'),
- (0x2483, '3', '(16)'),
- (0x2484, '3', '(17)'),
- (0x2485, '3', '(18)'),
- (0x2486, '3', '(19)'),
- (0x2487, '3', '(20)'),
+ (0x2460, 'M', u'1'),
+ (0x2461, 'M', u'2'),
+ ]
+
+def _seg_23():
+ return [
+ (0x2462, 'M', u'3'),
+ (0x2463, 'M', u'4'),
+ (0x2464, 'M', u'5'),
+ (0x2465, 'M', u'6'),
+ (0x2466, 'M', u'7'),
+ (0x2467, 'M', u'8'),
+ (0x2468, 'M', u'9'),
+ (0x2469, 'M', u'10'),
+ (0x246A, 'M', u'11'),
+ (0x246B, 'M', u'12'),
+ (0x246C, 'M', u'13'),
+ (0x246D, 'M', u'14'),
+ (0x246E, 'M', u'15'),
+ (0x246F, 'M', u'16'),
+ (0x2470, 'M', u'17'),
+ (0x2471, 'M', u'18'),
+ (0x2472, 'M', u'19'),
+ (0x2473, 'M', u'20'),
+ (0x2474, '3', u'(1)'),
+ (0x2475, '3', u'(2)'),
+ (0x2476, '3', u'(3)'),
+ (0x2477, '3', u'(4)'),
+ (0x2478, '3', u'(5)'),
+ (0x2479, '3', u'(6)'),
+ (0x247A, '3', u'(7)'),
+ (0x247B, '3', u'(8)'),
+ (0x247C, '3', u'(9)'),
+ (0x247D, '3', u'(10)'),
+ (0x247E, '3', u'(11)'),
+ (0x247F, '3', u'(12)'),
+ (0x2480, '3', u'(13)'),
+ (0x2481, '3', u'(14)'),
+ (0x2482, '3', u'(15)'),
+ (0x2483, '3', u'(16)'),
+ (0x2484, '3', u'(17)'),
+ (0x2485, '3', u'(18)'),
+ (0x2486, '3', u'(19)'),
+ (0x2487, '3', u'(20)'),
(0x2488, 'X'),
- (0x249C, '3', '(a)'),
- (0x249D, '3', '(b)'),
- (0x249E, '3', '(c)'),
- (0x249F, '3', '(d)'),
- (0x24A0, '3', '(e)'),
- (0x24A1, '3', '(f)'),
- (0x24A2, '3', '(g)'),
- (0x24A3, '3', '(h)'),
- (0x24A4, '3', '(i)'),
- (0x24A5, '3', '(j)'),
- (0x24A6, '3', '(k)'),
- (0x24A7, '3', '(l)'),
- (0x24A8, '3', '(m)'),
- (0x24A9, '3', '(n)'),
- (0x24AA, '3', '(o)'),
- (0x24AB, '3', '(p)'),
- (0x24AC, '3', '(q)'),
- (0x24AD, '3', '(r)'),
- (0x24AE, '3', '(s)'),
- (0x24AF, '3', '(t)'),
- (0x24B0, '3', '(u)'),
- (0x24B1, '3', '(v)'),
- (0x24B2, '3', '(w)'),
- (0x24B3, '3', '(x)'),
- (0x24B4, '3', '(y)'),
- (0x24B5, '3', '(z)'),
- (0x24B6, 'M', 'a'),
- (0x24B7, 'M', 'b'),
- (0x24B8, 'M', 'c'),
- (0x24B9, 'M', 'd'),
- (0x24BA, 'M', 'e'),
- (0x24BB, 'M', 'f'),
- (0x24BC, 'M', 'g'),
+ (0x249C, '3', u'(a)'),
+ (0x249D, '3', u'(b)'),
+ (0x249E, '3', u'(c)'),
+ (0x249F, '3', u'(d)'),
+ (0x24A0, '3', u'(e)'),
+ (0x24A1, '3', u'(f)'),
+ (0x24A2, '3', u'(g)'),
+ (0x24A3, '3', u'(h)'),
+ (0x24A4, '3', u'(i)'),
+ (0x24A5, '3', u'(j)'),
+ (0x24A6, '3', u'(k)'),
+ (0x24A7, '3', u'(l)'),
+ (0x24A8, '3', u'(m)'),
+ (0x24A9, '3', u'(n)'),
+ (0x24AA, '3', u'(o)'),
+ (0x24AB, '3', u'(p)'),
+ (0x24AC, '3', u'(q)'),
+ (0x24AD, '3', u'(r)'),
+ (0x24AE, '3', u'(s)'),
+ (0x24AF, '3', u'(t)'),
+ (0x24B0, '3', u'(u)'),
+ (0x24B1, '3', u'(v)'),
+ (0x24B2, '3', u'(w)'),
+ (0x24B3, '3', u'(x)'),
+ (0x24B4, '3', u'(y)'),
+ (0x24B5, '3', u'(z)'),
+ (0x24B6, 'M', u'a'),
+ (0x24B7, 'M', u'b'),
+ (0x24B8, 'M', u'c'),
+ (0x24B9, 'M', u'd'),
+ (0x24BA, 'M', u'e'),
+ (0x24BB, 'M', u'f'),
+ (0x24BC, 'M', u'g'),
+ (0x24BD, 'M', u'h'),
+ (0x24BE, 'M', u'i'),
+ (0x24BF, 'M', u'j'),
+ (0x24C0, 'M', u'k'),
+ (0x24C1, 'M', u'l'),
+ (0x24C2, 'M', u'm'),
+ (0x24C3, 'M', u'n'),
+ (0x24C4, 'M', u'o'),
+ (0x24C5, 'M', u'p'),
+ (0x24C6, 'M', u'q'),
+ (0x24C7, 'M', u'r'),
+ (0x24C8, 'M', u's'),
+ (0x24C9, 'M', u't'),
+ (0x24CA, 'M', u'u'),
+ (0x24CB, 'M', u'v'),
+ (0x24CC, 'M', u'w'),
+ (0x24CD, 'M', u'x'),
+ (0x24CE, 'M', u'y'),
+ (0x24CF, 'M', u'z'),
+ (0x24D0, 'M', u'a'),
+ (0x24D1, 'M', u'b'),
+ (0x24D2, 'M', u'c'),
+ (0x24D3, 'M', u'd'),
+ (0x24D4, 'M', u'e'),
+ (0x24D5, 'M', u'f'),
+ (0x24D6, 'M', u'g'),
+ (0x24D7, 'M', u'h'),
+ (0x24D8, 'M', u'i'),
]
-def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_24():
return [
- (0x24BD, 'M', 'h'),
- (0x24BE, 'M', 'i'),
- (0x24BF, 'M', 'j'),
- (0x24C0, 'M', 'k'),
- (0x24C1, 'M', 'l'),
- (0x24C2, 'M', 'm'),
- (0x24C3, 'M', 'n'),
- (0x24C4, 'M', 'o'),
- (0x24C5, 'M', 'p'),
- (0x24C6, 'M', 'q'),
- (0x24C7, 'M', 'r'),
- (0x24C8, 'M', 's'),
- (0x24C9, 'M', 't'),
- (0x24CA, 'M', 'u'),
- (0x24CB, 'M', 'v'),
- (0x24CC, 'M', 'w'),
- (0x24CD, 'M', 'x'),
- (0x24CE, 'M', 'y'),
- (0x24CF, 'M', 'z'),
- (0x24D0, 'M', 'a'),
- (0x24D1, 'M', 'b'),
- (0x24D2, 'M', 'c'),
- (0x24D3, 'M', 'd'),
- (0x24D4, 'M', 'e'),
- (0x24D5, 'M', 'f'),
- (0x24D6, 'M', 'g'),
- (0x24D7, 'M', 'h'),
- (0x24D8, 'M', 'i'),
- (0x24D9, 'M', 'j'),
- (0x24DA, 'M', 'k'),
- (0x24DB, 'M', 'l'),
- (0x24DC, 'M', 'm'),
- (0x24DD, 'M', 'n'),
- (0x24DE, 'M', 'o'),
- (0x24DF, 'M', 'p'),
- (0x24E0, 'M', 'q'),
- (0x24E1, 'M', 'r'),
- (0x24E2, 'M', 's'),
- (0x24E3, 'M', 't'),
- (0x24E4, 'M', 'u'),
- (0x24E5, 'M', 'v'),
- (0x24E6, 'M', 'w'),
- (0x24E7, 'M', 'x'),
- (0x24E8, 'M', 'y'),
- (0x24E9, 'M', 'z'),
- (0x24EA, 'M', '0'),
+ (0x24D9, 'M', u'j'),
+ (0x24DA, 'M', u'k'),
+ (0x24DB, 'M', u'l'),
+ (0x24DC, 'M', u'm'),
+ (0x24DD, 'M', u'n'),
+ (0x24DE, 'M', u'o'),
+ (0x24DF, 'M', u'p'),
+ (0x24E0, 'M', u'q'),
+ (0x24E1, 'M', u'r'),
+ (0x24E2, 'M', u's'),
+ (0x24E3, 'M', u't'),
+ (0x24E4, 'M', u'u'),
+ (0x24E5, 'M', u'v'),
+ (0x24E6, 'M', u'w'),
+ (0x24E7, 'M', u'x'),
+ (0x24E8, 'M', u'y'),
+ (0x24E9, 'M', u'z'),
+ (0x24EA, 'M', u'0'),
(0x24EB, 'V'),
- (0x2A0C, 'M', '∫∫∫∫'),
+ (0x2A0C, 'M', u'∫∫∫∫'),
(0x2A0D, 'V'),
- (0x2A74, '3', '::='),
- (0x2A75, '3', '=='),
- (0x2A76, '3', '==='),
+ (0x2A74, '3', u'::='),
+ (0x2A75, '3', u'=='),
+ (0x2A76, '3', u'==='),
(0x2A77, 'V'),
- (0x2ADC, 'M', '⫝̸'),
+ (0x2ADC, 'M', u'⫝̸'),
(0x2ADD, 'V'),
(0x2B74, 'X'),
(0x2B76, 'V'),
(0x2B96, 'X'),
- (0x2B97, 'V'),
- (0x2C00, 'M', 'ⰰ'),
- (0x2C01, 'M', 'ⰱ'),
- (0x2C02, 'M', 'ⰲ'),
- (0x2C03, 'M', 'ⰳ'),
- (0x2C04, 'M', 'ⰴ'),
- (0x2C05, 'M', 'ⰵ'),
- (0x2C06, 'M', 'ⰶ'),
- (0x2C07, 'M', 'ⰷ'),
- (0x2C08, 'M', 'ⰸ'),
- (0x2C09, 'M', 'ⰹ'),
- (0x2C0A, 'M', 'ⰺ'),
- (0x2C0B, 'M', 'ⰻ'),
- (0x2C0C, 'M', 'ⰼ'),
- (0x2C0D, 'M', 'ⰽ'),
- (0x2C0E, 'M', 'ⰾ'),
- (0x2C0F, 'M', 'ⰿ'),
- (0x2C10, 'M', 'ⱀ'),
- (0x2C11, 'M', 'ⱁ'),
- (0x2C12, 'M', 'ⱂ'),
- (0x2C13, 'M', 'ⱃ'),
- (0x2C14, 'M', 'ⱄ'),
- (0x2C15, 'M', 'ⱅ'),
- (0x2C16, 'M', 'ⱆ'),
- (0x2C17, 'M', 'ⱇ'),
- (0x2C18, 'M', 'ⱈ'),
- (0x2C19, 'M', 'ⱉ'),
- (0x2C1A, 'M', 'ⱊ'),
- (0x2C1B, 'M', 'ⱋ'),
- (0x2C1C, 'M', 'ⱌ'),
- (0x2C1D, 'M', 'ⱍ'),
- (0x2C1E, 'M', 'ⱎ'),
- (0x2C1F, 'M', 'ⱏ'),
- (0x2C20, 'M', 'ⱐ'),
- (0x2C21, 'M', 'ⱑ'),
- (0x2C22, 'M', 'ⱒ'),
- (0x2C23, 'M', 'ⱓ'),
- (0x2C24, 'M', 'ⱔ'),
- (0x2C25, 'M', 'ⱕ'),
- (0x2C26, 'M', 'ⱖ'),
- (0x2C27, 'M', 'ⱗ'),
- (0x2C28, 'M', 'ⱘ'),
- ]
-
-def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x2C29, 'M', 'ⱙ'),
- (0x2C2A, 'M', 'ⱚ'),
- (0x2C2B, 'M', 'ⱛ'),
- (0x2C2C, 'M', 'ⱜ'),
- (0x2C2D, 'M', 'ⱝ'),
- (0x2C2E, 'M', 'ⱞ'),
- (0x2C2F, 'M', 'ⱟ'),
+ (0x2B98, 'V'),
+ (0x2BC9, 'X'),
+ (0x2BCA, 'V'),
+ (0x2BFF, 'X'),
+ (0x2C00, 'M', u'ⰰ'),
+ (0x2C01, 'M', u'ⰱ'),
+ (0x2C02, 'M', u'ⰲ'),
+ (0x2C03, 'M', u'ⰳ'),
+ (0x2C04, 'M', u'ⰴ'),
+ (0x2C05, 'M', u'ⰵ'),
+ (0x2C06, 'M', u'ⰶ'),
+ (0x2C07, 'M', u'ⰷ'),
+ (0x2C08, 'M', u'ⰸ'),
+ (0x2C09, 'M', u'ⰹ'),
+ (0x2C0A, 'M', u'ⰺ'),
+ (0x2C0B, 'M', u'ⰻ'),
+ (0x2C0C, 'M', u'ⰼ'),
+ (0x2C0D, 'M', u'ⰽ'),
+ (0x2C0E, 'M', u'ⰾ'),
+ (0x2C0F, 'M', u'ⰿ'),
+ (0x2C10, 'M', u'ⱀ'),
+ (0x2C11, 'M', u'ⱁ'),
+ (0x2C12, 'M', u'ⱂ'),
+ (0x2C13, 'M', u'ⱃ'),
+ (0x2C14, 'M', u'ⱄ'),
+ (0x2C15, 'M', u'ⱅ'),
+ (0x2C16, 'M', u'ⱆ'),
+ (0x2C17, 'M', u'ⱇ'),
+ (0x2C18, 'M', u'ⱈ'),
+ (0x2C19, 'M', u'ⱉ'),
+ (0x2C1A, 'M', u'ⱊ'),
+ (0x2C1B, 'M', u'ⱋ'),
+ (0x2C1C, 'M', u'ⱌ'),
+ (0x2C1D, 'M', u'ⱍ'),
+ (0x2C1E, 'M', u'ⱎ'),
+ (0x2C1F, 'M', u'ⱏ'),
+ (0x2C20, 'M', u'ⱐ'),
+ (0x2C21, 'M', u'ⱑ'),
+ (0x2C22, 'M', u'ⱒ'),
+ (0x2C23, 'M', u'ⱓ'),
+ (0x2C24, 'M', u'ⱔ'),
+ (0x2C25, 'M', u'ⱕ'),
+ (0x2C26, 'M', u'ⱖ'),
+ (0x2C27, 'M', u'ⱗ'),
+ (0x2C28, 'M', u'ⱘ'),
+ (0x2C29, 'M', u'ⱙ'),
+ (0x2C2A, 'M', u'ⱚ'),
+ (0x2C2B, 'M', u'ⱛ'),
+ (0x2C2C, 'M', u'ⱜ'),
+ (0x2C2D, 'M', u'ⱝ'),
+ (0x2C2E, 'M', u'ⱞ'),
+ (0x2C2F, 'X'),
(0x2C30, 'V'),
- (0x2C60, 'M', 'ⱡ'),
+ (0x2C5F, 'X'),
+ (0x2C60, 'M', u'ⱡ'),
(0x2C61, 'V'),
- (0x2C62, 'M', 'ɫ'),
- (0x2C63, 'M', 'ᵽ'),
- (0x2C64, 'M', 'ɽ'),
+ (0x2C62, 'M', u'ɫ'),
+ (0x2C63, 'M', u'ᵽ'),
+ (0x2C64, 'M', u'ɽ'),
(0x2C65, 'V'),
- (0x2C67, 'M', 'ⱨ'),
+ (0x2C67, 'M', u'ⱨ'),
(0x2C68, 'V'),
- (0x2C69, 'M', 'ⱪ'),
+ (0x2C69, 'M', u'ⱪ'),
(0x2C6A, 'V'),
- (0x2C6B, 'M', 'ⱬ'),
+ (0x2C6B, 'M', u'ⱬ'),
(0x2C6C, 'V'),
- (0x2C6D, 'M', 'ɑ'),
- (0x2C6E, 'M', 'ɱ'),
- (0x2C6F, 'M', 'ɐ'),
- (0x2C70, 'M', 'ɒ'),
+ (0x2C6D, 'M', u'ɑ'),
+ (0x2C6E, 'M', u'ɱ'),
+ (0x2C6F, 'M', u'ɐ'),
+ (0x2C70, 'M', u'ɒ'),
+ ]
+
+def _seg_25():
+ return [
(0x2C71, 'V'),
- (0x2C72, 'M', 'ⱳ'),
+ (0x2C72, 'M', u'ⱳ'),
(0x2C73, 'V'),
- (0x2C75, 'M', 'ⱶ'),
+ (0x2C75, 'M', u'ⱶ'),
(0x2C76, 'V'),
- (0x2C7C, 'M', 'j'),
- (0x2C7D, 'M', 'v'),
- (0x2C7E, 'M', 'ȿ'),
- (0x2C7F, 'M', 'ɀ'),
- (0x2C80, 'M', 'ⲁ'),
+ (0x2C7C, 'M', u'j'),
+ (0x2C7D, 'M', u'v'),
+ (0x2C7E, 'M', u'ȿ'),
+ (0x2C7F, 'M', u'ɀ'),
+ (0x2C80, 'M', u'ⲁ'),
(0x2C81, 'V'),
- (0x2C82, 'M', 'ⲃ'),
+ (0x2C82, 'M', u'ⲃ'),
(0x2C83, 'V'),
- (0x2C84, 'M', 'ⲅ'),
+ (0x2C84, 'M', u'ⲅ'),
(0x2C85, 'V'),
- (0x2C86, 'M', 'ⲇ'),
+ (0x2C86, 'M', u'ⲇ'),
(0x2C87, 'V'),
- (0x2C88, 'M', 'ⲉ'),
+ (0x2C88, 'M', u'ⲉ'),
(0x2C89, 'V'),
- (0x2C8A, 'M', 'ⲋ'),
+ (0x2C8A, 'M', u'ⲋ'),
(0x2C8B, 'V'),
- (0x2C8C, 'M', 'ⲍ'),
+ (0x2C8C, 'M', u'ⲍ'),
(0x2C8D, 'V'),
- (0x2C8E, 'M', 'ⲏ'),
+ (0x2C8E, 'M', u'ⲏ'),
(0x2C8F, 'V'),
- (0x2C90, 'M', 'ⲑ'),
+ (0x2C90, 'M', u'ⲑ'),
(0x2C91, 'V'),
- (0x2C92, 'M', 'ⲓ'),
+ (0x2C92, 'M', u'ⲓ'),
(0x2C93, 'V'),
- (0x2C94, 'M', 'ⲕ'),
+ (0x2C94, 'M', u'ⲕ'),
(0x2C95, 'V'),
- (0x2C96, 'M', 'ⲗ'),
+ (0x2C96, 'M', u'ⲗ'),
(0x2C97, 'V'),
- (0x2C98, 'M', 'ⲙ'),
+ (0x2C98, 'M', u'ⲙ'),
(0x2C99, 'V'),
- (0x2C9A, 'M', 'ⲛ'),
+ (0x2C9A, 'M', u'ⲛ'),
(0x2C9B, 'V'),
- (0x2C9C, 'M', 'ⲝ'),
+ (0x2C9C, 'M', u'ⲝ'),
(0x2C9D, 'V'),
- (0x2C9E, 'M', 'ⲟ'),
+ (0x2C9E, 'M', u'ⲟ'),
(0x2C9F, 'V'),
- (0x2CA0, 'M', 'ⲡ'),
+ (0x2CA0, 'M', u'ⲡ'),
(0x2CA1, 'V'),
- (0x2CA2, 'M', 'ⲣ'),
+ (0x2CA2, 'M', u'ⲣ'),
(0x2CA3, 'V'),
- (0x2CA4, 'M', 'ⲥ'),
+ (0x2CA4, 'M', u'ⲥ'),
(0x2CA5, 'V'),
- (0x2CA6, 'M', 'ⲧ'),
+ (0x2CA6, 'M', u'ⲧ'),
(0x2CA7, 'V'),
- (0x2CA8, 'M', 'ⲩ'),
+ (0x2CA8, 'M', u'ⲩ'),
(0x2CA9, 'V'),
- (0x2CAA, 'M', 'ⲫ'),
+ (0x2CAA, 'M', u'ⲫ'),
(0x2CAB, 'V'),
- (0x2CAC, 'M', 'ⲭ'),
+ (0x2CAC, 'M', u'ⲭ'),
(0x2CAD, 'V'),
- (0x2CAE, 'M', 'ⲯ'),
+ (0x2CAE, 'M', u'ⲯ'),
(0x2CAF, 'V'),
- (0x2CB0, 'M', 'ⲱ'),
+ (0x2CB0, 'M', u'ⲱ'),
(0x2CB1, 'V'),
- (0x2CB2, 'M', 'ⲳ'),
+ (0x2CB2, 'M', u'ⲳ'),
(0x2CB3, 'V'),
- (0x2CB4, 'M', 'ⲵ'),
+ (0x2CB4, 'M', u'ⲵ'),
(0x2CB5, 'V'),
- (0x2CB6, 'M', 'ⲷ'),
+ (0x2CB6, 'M', u'ⲷ'),
(0x2CB7, 'V'),
- (0x2CB8, 'M', 'ⲹ'),
+ (0x2CB8, 'M', u'ⲹ'),
(0x2CB9, 'V'),
- (0x2CBA, 'M', 'ⲻ'),
+ (0x2CBA, 'M', u'ⲻ'),
(0x2CBB, 'V'),
- (0x2CBC, 'M', 'ⲽ'),
+ (0x2CBC, 'M', u'ⲽ'),
(0x2CBD, 'V'),
- (0x2CBE, 'M', 'ⲿ'),
+ (0x2CBE, 'M', u'ⲿ'),
(0x2CBF, 'V'),
- (0x2CC0, 'M', 'ⳁ'),
+ (0x2CC0, 'M', u'ⳁ'),
(0x2CC1, 'V'),
- (0x2CC2, 'M', 'ⳃ'),
- ]
-
-def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
+ (0x2CC2, 'M', u'ⳃ'),
(0x2CC3, 'V'),
- (0x2CC4, 'M', 'ⳅ'),
+ (0x2CC4, 'M', u'ⳅ'),
(0x2CC5, 'V'),
- (0x2CC6, 'M', 'ⳇ'),
+ (0x2CC6, 'M', u'ⳇ'),
(0x2CC7, 'V'),
- (0x2CC8, 'M', 'ⳉ'),
+ (0x2CC8, 'M', u'ⳉ'),
(0x2CC9, 'V'),
- (0x2CCA, 'M', 'ⳋ'),
+ (0x2CCA, 'M', u'ⳋ'),
(0x2CCB, 'V'),
- (0x2CCC, 'M', 'ⳍ'),
+ (0x2CCC, 'M', u'ⳍ'),
(0x2CCD, 'V'),
- (0x2CCE, 'M', 'ⳏ'),
+ (0x2CCE, 'M', u'ⳏ'),
(0x2CCF, 'V'),
- (0x2CD0, 'M', 'ⳑ'),
+ (0x2CD0, 'M', u'ⳑ'),
(0x2CD1, 'V'),
- (0x2CD2, 'M', 'ⳓ'),
+ (0x2CD2, 'M', u'ⳓ'),
(0x2CD3, 'V'),
- (0x2CD4, 'M', 'ⳕ'),
+ (0x2CD4, 'M', u'ⳕ'),
(0x2CD5, 'V'),
- (0x2CD6, 'M', 'ⳗ'),
+ (0x2CD6, 'M', u'ⳗ'),
(0x2CD7, 'V'),
- (0x2CD8, 'M', 'ⳙ'),
+ (0x2CD8, 'M', u'ⳙ'),
(0x2CD9, 'V'),
- (0x2CDA, 'M', 'ⳛ'),
+ (0x2CDA, 'M', u'ⳛ'),
+ ]
+
+def _seg_26():
+ return [
(0x2CDB, 'V'),
- (0x2CDC, 'M', 'ⳝ'),
+ (0x2CDC, 'M', u'ⳝ'),
(0x2CDD, 'V'),
- (0x2CDE, 'M', 'ⳟ'),
+ (0x2CDE, 'M', u'ⳟ'),
(0x2CDF, 'V'),
- (0x2CE0, 'M', 'ⳡ'),
+ (0x2CE0, 'M', u'ⳡ'),
(0x2CE1, 'V'),
- (0x2CE2, 'M', 'ⳣ'),
+ (0x2CE2, 'M', u'ⳣ'),
(0x2CE3, 'V'),
- (0x2CEB, 'M', 'ⳬ'),
+ (0x2CEB, 'M', u'ⳬ'),
(0x2CEC, 'V'),
- (0x2CED, 'M', 'ⳮ'),
+ (0x2CED, 'M', u'ⳮ'),
(0x2CEE, 'V'),
- (0x2CF2, 'M', 'ⳳ'),
+ (0x2CF2, 'M', u'ⳳ'),
(0x2CF3, 'V'),
(0x2CF4, 'X'),
(0x2CF9, 'V'),
@@ -2762,7 +2735,7 @@ def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x2D2E, 'X'),
(0x2D30, 'V'),
(0x2D68, 'X'),
- (0x2D6F, 'M', 'ⵡ'),
+ (0x2D6F, 'M', u'ⵡ'),
(0x2D70, 'V'),
(0x2D71, 'X'),
(0x2D7F, 'V'),
@@ -2784,1184 +2757,1154 @@ def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x2DD8, 'V'),
(0x2DDF, 'X'),
(0x2DE0, 'V'),
- (0x2E5E, 'X'),
+ (0x2E4F, 'X'),
(0x2E80, 'V'),
(0x2E9A, 'X'),
(0x2E9B, 'V'),
- (0x2E9F, 'M', '母'),
+ (0x2E9F, 'M', u'母'),
(0x2EA0, 'V'),
- (0x2EF3, 'M', '龟'),
+ (0x2EF3, 'M', u'龟'),
(0x2EF4, 'X'),
- (0x2F00, 'M', '一'),
- (0x2F01, 'M', '丨'),
- (0x2F02, 'M', '丶'),
- (0x2F03, 'M', '丿'),
- (0x2F04, 'M', '乙'),
- (0x2F05, 'M', '亅'),
- (0x2F06, 'M', '二'),
- (0x2F07, 'M', '亠'),
- (0x2F08, 'M', '人'),
- (0x2F09, 'M', '儿'),
- (0x2F0A, 'M', '入'),
- (0x2F0B, 'M', '八'),
- (0x2F0C, 'M', '冂'),
- (0x2F0D, 'M', '冖'),
- (0x2F0E, 'M', '冫'),
- (0x2F0F, 'M', '几'),
- (0x2F10, 'M', '凵'),
- (0x2F11, 'M', '刀'),
- (0x2F12, 'M', '力'),
- (0x2F13, 'M', '勹'),
- (0x2F14, 'M', '匕'),
- (0x2F15, 'M', '匚'),
+ (0x2F00, 'M', u'一'),
+ (0x2F01, 'M', u'丨'),
+ (0x2F02, 'M', u'丶'),
+ (0x2F03, 'M', u'丿'),
+ (0x2F04, 'M', u'乙'),
+ (0x2F05, 'M', u'亅'),
+ (0x2F06, 'M', u'二'),
+ (0x2F07, 'M', u'亠'),
+ (0x2F08, 'M', u'人'),
+ (0x2F09, 'M', u'儿'),
+ (0x2F0A, 'M', u'入'),
+ (0x2F0B, 'M', u'八'),
+ (0x2F0C, 'M', u'冂'),
+ (0x2F0D, 'M', u'冖'),
+ (0x2F0E, 'M', u'冫'),
+ (0x2F0F, 'M', u'几'),
+ (0x2F10, 'M', u'凵'),
+ (0x2F11, 'M', u'刀'),
+ (0x2F12, 'M', u'力'),
+ (0x2F13, 'M', u'勹'),
+ (0x2F14, 'M', u'匕'),
+ (0x2F15, 'M', u'匚'),
+ (0x2F16, 'M', u'匸'),
+ (0x2F17, 'M', u'十'),
+ (0x2F18, 'M', u'卜'),
+ (0x2F19, 'M', u'卩'),
+ (0x2F1A, 'M', u'厂'),
+ (0x2F1B, 'M', u'厶'),
+ (0x2F1C, 'M', u'又'),
+ (0x2F1D, 'M', u'口'),
+ (0x2F1E, 'M', u'囗'),
+ (0x2F1F, 'M', u'土'),
+ (0x2F20, 'M', u'士'),
+ (0x2F21, 'M', u'夂'),
+ (0x2F22, 'M', u'夊'),
+ (0x2F23, 'M', u'夕'),
+ (0x2F24, 'M', u'大'),
+ (0x2F25, 'M', u'女'),
+ (0x2F26, 'M', u'子'),
+ (0x2F27, 'M', u'宀'),
+ (0x2F28, 'M', u'寸'),
+ (0x2F29, 'M', u'小'),
+ (0x2F2A, 'M', u'尢'),
+ (0x2F2B, 'M', u'尸'),
+ (0x2F2C, 'M', u'屮'),
+ (0x2F2D, 'M', u'山'),
]
-def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_27():
return [
- (0x2F16, 'M', '匸'),
- (0x2F17, 'M', '十'),
- (0x2F18, 'M', '卜'),
- (0x2F19, 'M', '卩'),
- (0x2F1A, 'M', '厂'),
- (0x2F1B, 'M', '厶'),
- (0x2F1C, 'M', '又'),
- (0x2F1D, 'M', '口'),
- (0x2F1E, 'M', '囗'),
- (0x2F1F, 'M', '土'),
- (0x2F20, 'M', '士'),
- (0x2F21, 'M', '夂'),
- (0x2F22, 'M', '夊'),
- (0x2F23, 'M', '夕'),
- (0x2F24, 'M', '大'),
- (0x2F25, 'M', '女'),
- (0x2F26, 'M', '子'),
- (0x2F27, 'M', '宀'),
- (0x2F28, 'M', '寸'),
- (0x2F29, 'M', '小'),
- (0x2F2A, 'M', '尢'),
- (0x2F2B, 'M', '尸'),
- (0x2F2C, 'M', '屮'),
- (0x2F2D, 'M', '山'),
- (0x2F2E, 'M', '巛'),
- (0x2F2F, 'M', '工'),
- (0x2F30, 'M', '己'),
- (0x2F31, 'M', '巾'),
- (0x2F32, 'M', '干'),
- (0x2F33, 'M', '幺'),
- (0x2F34, 'M', '广'),
- (0x2F35, 'M', '廴'),
- (0x2F36, 'M', '廾'),
- (0x2F37, 'M', '弋'),
- (0x2F38, 'M', '弓'),
- (0x2F39, 'M', '彐'),
- (0x2F3A, 'M', '彡'),
- (0x2F3B, 'M', '彳'),
- (0x2F3C, 'M', '心'),
- (0x2F3D, 'M', '戈'),
- (0x2F3E, 'M', '戶'),
- (0x2F3F, 'M', '手'),
- (0x2F40, 'M', '支'),
- (0x2F41, 'M', '攴'),
- (0x2F42, 'M', '文'),
- (0x2F43, 'M', '斗'),
- (0x2F44, 'M', '斤'),
- (0x2F45, 'M', '方'),
- (0x2F46, 'M', '无'),
- (0x2F47, 'M', '日'),
- (0x2F48, 'M', '曰'),
- (0x2F49, 'M', '月'),
- (0x2F4A, 'M', '木'),
- (0x2F4B, 'M', '欠'),
- (0x2F4C, 'M', '止'),
- (0x2F4D, 'M', '歹'),
- (0x2F4E, 'M', '殳'),
- (0x2F4F, 'M', '毋'),
- (0x2F50, 'M', '比'),
- (0x2F51, 'M', '毛'),
- (0x2F52, 'M', '氏'),
- (0x2F53, 'M', '气'),
- (0x2F54, 'M', '水'),
- (0x2F55, 'M', '火'),
- (0x2F56, 'M', '爪'),
- (0x2F57, 'M', '父'),
- (0x2F58, 'M', '爻'),
- (0x2F59, 'M', '爿'),
- (0x2F5A, 'M', '片'),
- (0x2F5B, 'M', '牙'),
- (0x2F5C, 'M', '牛'),
- (0x2F5D, 'M', '犬'),
- (0x2F5E, 'M', '玄'),
- (0x2F5F, 'M', '玉'),
- (0x2F60, 'M', '瓜'),
- (0x2F61, 'M', '瓦'),
- (0x2F62, 'M', '甘'),
- (0x2F63, 'M', '生'),
- (0x2F64, 'M', '用'),
- (0x2F65, 'M', '田'),
- (0x2F66, 'M', '疋'),
- (0x2F67, 'M', '疒'),
- (0x2F68, 'M', '癶'),
- (0x2F69, 'M', '白'),
- (0x2F6A, 'M', '皮'),
- (0x2F6B, 'M', '皿'),
- (0x2F6C, 'M', '目'),
- (0x2F6D, 'M', '矛'),
- (0x2F6E, 'M', '矢'),
- (0x2F6F, 'M', '石'),
- (0x2F70, 'M', '示'),
- (0x2F71, 'M', '禸'),
- (0x2F72, 'M', '禾'),
- (0x2F73, 'M', '穴'),
- (0x2F74, 'M', '立'),
- (0x2F75, 'M', '竹'),
- (0x2F76, 'M', '米'),
- (0x2F77, 'M', '糸'),
- (0x2F78, 'M', '缶'),
- (0x2F79, 'M', '网'),
+ (0x2F2E, 'M', u'巛'),
+ (0x2F2F, 'M', u'工'),
+ (0x2F30, 'M', u'己'),
+ (0x2F31, 'M', u'巾'),
+ (0x2F32, 'M', u'干'),
+ (0x2F33, 'M', u'幺'),
+ (0x2F34, 'M', u'广'),
+ (0x2F35, 'M', u'廴'),
+ (0x2F36, 'M', u'廾'),
+ (0x2F37, 'M', u'弋'),
+ (0x2F38, 'M', u'弓'),
+ (0x2F39, 'M', u'彐'),
+ (0x2F3A, 'M', u'彡'),
+ (0x2F3B, 'M', u'彳'),
+ (0x2F3C, 'M', u'心'),
+ (0x2F3D, 'M', u'戈'),
+ (0x2F3E, 'M', u'戶'),
+ (0x2F3F, 'M', u'手'),
+ (0x2F40, 'M', u'支'),
+ (0x2F41, 'M', u'攴'),
+ (0x2F42, 'M', u'文'),
+ (0x2F43, 'M', u'斗'),
+ (0x2F44, 'M', u'斤'),
+ (0x2F45, 'M', u'方'),
+ (0x2F46, 'M', u'无'),
+ (0x2F47, 'M', u'日'),
+ (0x2F48, 'M', u'曰'),
+ (0x2F49, 'M', u'月'),
+ (0x2F4A, 'M', u'木'),
+ (0x2F4B, 'M', u'欠'),
+ (0x2F4C, 'M', u'止'),
+ (0x2F4D, 'M', u'歹'),
+ (0x2F4E, 'M', u'殳'),
+ (0x2F4F, 'M', u'毋'),
+ (0x2F50, 'M', u'比'),
+ (0x2F51, 'M', u'毛'),
+ (0x2F52, 'M', u'氏'),
+ (0x2F53, 'M', u'气'),
+ (0x2F54, 'M', u'水'),
+ (0x2F55, 'M', u'火'),
+ (0x2F56, 'M', u'爪'),
+ (0x2F57, 'M', u'父'),
+ (0x2F58, 'M', u'爻'),
+ (0x2F59, 'M', u'爿'),
+ (0x2F5A, 'M', u'片'),
+ (0x2F5B, 'M', u'牙'),
+ (0x2F5C, 'M', u'牛'),
+ (0x2F5D, 'M', u'犬'),
+ (0x2F5E, 'M', u'玄'),
+ (0x2F5F, 'M', u'玉'),
+ (0x2F60, 'M', u'瓜'),
+ (0x2F61, 'M', u'瓦'),
+ (0x2F62, 'M', u'甘'),
+ (0x2F63, 'M', u'生'),
+ (0x2F64, 'M', u'用'),
+ (0x2F65, 'M', u'田'),
+ (0x2F66, 'M', u'疋'),
+ (0x2F67, 'M', u'疒'),
+ (0x2F68, 'M', u'癶'),
+ (0x2F69, 'M', u'白'),
+ (0x2F6A, 'M', u'皮'),
+ (0x2F6B, 'M', u'皿'),
+ (0x2F6C, 'M', u'目'),
+ (0x2F6D, 'M', u'矛'),
+ (0x2F6E, 'M', u'矢'),
+ (0x2F6F, 'M', u'石'),
+ (0x2F70, 'M', u'示'),
+ (0x2F71, 'M', u'禸'),
+ (0x2F72, 'M', u'禾'),
+ (0x2F73, 'M', u'穴'),
+ (0x2F74, 'M', u'立'),
+ (0x2F75, 'M', u'竹'),
+ (0x2F76, 'M', u'米'),
+ (0x2F77, 'M', u'糸'),
+ (0x2F78, 'M', u'缶'),
+ (0x2F79, 'M', u'网'),
+ (0x2F7A, 'M', u'羊'),
+ (0x2F7B, 'M', u'羽'),
+ (0x2F7C, 'M', u'老'),
+ (0x2F7D, 'M', u'而'),
+ (0x2F7E, 'M', u'耒'),
+ (0x2F7F, 'M', u'耳'),
+ (0x2F80, 'M', u'聿'),
+ (0x2F81, 'M', u'肉'),
+ (0x2F82, 'M', u'臣'),
+ (0x2F83, 'M', u'自'),
+ (0x2F84, 'M', u'至'),
+ (0x2F85, 'M', u'臼'),
+ (0x2F86, 'M', u'舌'),
+ (0x2F87, 'M', u'舛'),
+ (0x2F88, 'M', u'舟'),
+ (0x2F89, 'M', u'艮'),
+ (0x2F8A, 'M', u'色'),
+ (0x2F8B, 'M', u'艸'),
+ (0x2F8C, 'M', u'虍'),
+ (0x2F8D, 'M', u'虫'),
+ (0x2F8E, 'M', u'血'),
+ (0x2F8F, 'M', u'行'),
+ (0x2F90, 'M', u'衣'),
+ (0x2F91, 'M', u'襾'),
]
-def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_28():
return [
- (0x2F7A, 'M', '羊'),
- (0x2F7B, 'M', '羽'),
- (0x2F7C, 'M', '老'),
- (0x2F7D, 'M', '而'),
- (0x2F7E, 'M', '耒'),
- (0x2F7F, 'M', '耳'),
- (0x2F80, 'M', '聿'),
- (0x2F81, 'M', '肉'),
- (0x2F82, 'M', '臣'),
- (0x2F83, 'M', '自'),
- (0x2F84, 'M', '至'),
- (0x2F85, 'M', '臼'),
- (0x2F86, 'M', '舌'),
- (0x2F87, 'M', '舛'),
- (0x2F88, 'M', '舟'),
- (0x2F89, 'M', '艮'),
- (0x2F8A, 'M', '色'),
- (0x2F8B, 'M', '艸'),
- (0x2F8C, 'M', '虍'),
- (0x2F8D, 'M', '虫'),
- (0x2F8E, 'M', '血'),
- (0x2F8F, 'M', '行'),
- (0x2F90, 'M', '衣'),
- (0x2F91, 'M', '襾'),
- (0x2F92, 'M', '見'),
- (0x2F93, 'M', '角'),
- (0x2F94, 'M', '言'),
- (0x2F95, 'M', '谷'),
- (0x2F96, 'M', '豆'),
- (0x2F97, 'M', '豕'),
- (0x2F98, 'M', '豸'),
- (0x2F99, 'M', '貝'),
- (0x2F9A, 'M', '赤'),
- (0x2F9B, 'M', '走'),
- (0x2F9C, 'M', '足'),
- (0x2F9D, 'M', '身'),
- (0x2F9E, 'M', '車'),
- (0x2F9F, 'M', '辛'),
- (0x2FA0, 'M', '辰'),
- (0x2FA1, 'M', '辵'),
- (0x2FA2, 'M', '邑'),
- (0x2FA3, 'M', '酉'),
- (0x2FA4, 'M', '釆'),
- (0x2FA5, 'M', '里'),
- (0x2FA6, 'M', '金'),
- (0x2FA7, 'M', '長'),
- (0x2FA8, 'M', '門'),
- (0x2FA9, 'M', '阜'),
- (0x2FAA, 'M', '隶'),
- (0x2FAB, 'M', '隹'),
- (0x2FAC, 'M', '雨'),
- (0x2FAD, 'M', '靑'),
- (0x2FAE, 'M', '非'),
- (0x2FAF, 'M', '面'),
- (0x2FB0, 'M', '革'),
- (0x2FB1, 'M', '韋'),
- (0x2FB2, 'M', '韭'),
- (0x2FB3, 'M', '音'),
- (0x2FB4, 'M', '頁'),
- (0x2FB5, 'M', '風'),
- (0x2FB6, 'M', '飛'),
- (0x2FB7, 'M', '食'),
- (0x2FB8, 'M', '首'),
- (0x2FB9, 'M', '香'),
- (0x2FBA, 'M', '馬'),
- (0x2FBB, 'M', '骨'),
- (0x2FBC, 'M', '高'),
- (0x2FBD, 'M', '髟'),
- (0x2FBE, 'M', '鬥'),
- (0x2FBF, 'M', '鬯'),
- (0x2FC0, 'M', '鬲'),
- (0x2FC1, 'M', '鬼'),
- (0x2FC2, 'M', '魚'),
- (0x2FC3, 'M', '鳥'),
- (0x2FC4, 'M', '鹵'),
- (0x2FC5, 'M', '鹿'),
- (0x2FC6, 'M', '麥'),
- (0x2FC7, 'M', '麻'),
- (0x2FC8, 'M', '黃'),
- (0x2FC9, 'M', '黍'),
- (0x2FCA, 'M', '黑'),
- (0x2FCB, 'M', '黹'),
- (0x2FCC, 'M', '黽'),
- (0x2FCD, 'M', '鼎'),
- (0x2FCE, 'M', '鼓'),
- (0x2FCF, 'M', '鼠'),
- (0x2FD0, 'M', '鼻'),
- (0x2FD1, 'M', '齊'),
- (0x2FD2, 'M', '齒'),
- (0x2FD3, 'M', '龍'),
- (0x2FD4, 'M', '龜'),
- (0x2FD5, 'M', '龠'),
+ (0x2F92, 'M', u'見'),
+ (0x2F93, 'M', u'角'),
+ (0x2F94, 'M', u'言'),
+ (0x2F95, 'M', u'谷'),
+ (0x2F96, 'M', u'豆'),
+ (0x2F97, 'M', u'豕'),
+ (0x2F98, 'M', u'豸'),
+ (0x2F99, 'M', u'貝'),
+ (0x2F9A, 'M', u'赤'),
+ (0x2F9B, 'M', u'走'),
+ (0x2F9C, 'M', u'足'),
+ (0x2F9D, 'M', u'身'),
+ (0x2F9E, 'M', u'車'),
+ (0x2F9F, 'M', u'辛'),
+ (0x2FA0, 'M', u'辰'),
+ (0x2FA1, 'M', u'辵'),
+ (0x2FA2, 'M', u'邑'),
+ (0x2FA3, 'M', u'酉'),
+ (0x2FA4, 'M', u'釆'),
+ (0x2FA5, 'M', u'里'),
+ (0x2FA6, 'M', u'金'),
+ (0x2FA7, 'M', u'長'),
+ (0x2FA8, 'M', u'門'),
+ (0x2FA9, 'M', u'阜'),
+ (0x2FAA, 'M', u'隶'),
+ (0x2FAB, 'M', u'隹'),
+ (0x2FAC, 'M', u'雨'),
+ (0x2FAD, 'M', u'靑'),
+ (0x2FAE, 'M', u'非'),
+ (0x2FAF, 'M', u'面'),
+ (0x2FB0, 'M', u'革'),
+ (0x2FB1, 'M', u'韋'),
+ (0x2FB2, 'M', u'韭'),
+ (0x2FB3, 'M', u'音'),
+ (0x2FB4, 'M', u'頁'),
+ (0x2FB5, 'M', u'風'),
+ (0x2FB6, 'M', u'飛'),
+ (0x2FB7, 'M', u'食'),
+ (0x2FB8, 'M', u'首'),
+ (0x2FB9, 'M', u'香'),
+ (0x2FBA, 'M', u'馬'),
+ (0x2FBB, 'M', u'骨'),
+ (0x2FBC, 'M', u'高'),
+ (0x2FBD, 'M', u'髟'),
+ (0x2FBE, 'M', u'鬥'),
+ (0x2FBF, 'M', u'鬯'),
+ (0x2FC0, 'M', u'鬲'),
+ (0x2FC1, 'M', u'鬼'),
+ (0x2FC2, 'M', u'魚'),
+ (0x2FC3, 'M', u'鳥'),
+ (0x2FC4, 'M', u'鹵'),
+ (0x2FC5, 'M', u'鹿'),
+ (0x2FC6, 'M', u'麥'),
+ (0x2FC7, 'M', u'麻'),
+ (0x2FC8, 'M', u'黃'),
+ (0x2FC9, 'M', u'黍'),
+ (0x2FCA, 'M', u'黑'),
+ (0x2FCB, 'M', u'黹'),
+ (0x2FCC, 'M', u'黽'),
+ (0x2FCD, 'M', u'鼎'),
+ (0x2FCE, 'M', u'鼓'),
+ (0x2FCF, 'M', u'鼠'),
+ (0x2FD0, 'M', u'鼻'),
+ (0x2FD1, 'M', u'齊'),
+ (0x2FD2, 'M', u'齒'),
+ (0x2FD3, 'M', u'龍'),
+ (0x2FD4, 'M', u'龜'),
+ (0x2FD5, 'M', u'龠'),
(0x2FD6, 'X'),
- (0x3000, '3', ' '),
+ (0x3000, '3', u' '),
(0x3001, 'V'),
- (0x3002, 'M', '.'),
+ (0x3002, 'M', u'.'),
(0x3003, 'V'),
- (0x3036, 'M', '〒'),
+ (0x3036, 'M', u'〒'),
(0x3037, 'V'),
- (0x3038, 'M', '十'),
- ]
-
-def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x3039, 'M', '卄'),
- (0x303A, 'M', '卅'),
+ (0x3038, 'M', u'十'),
+ (0x3039, 'M', u'卄'),
+ (0x303A, 'M', u'卅'),
(0x303B, 'V'),
(0x3040, 'X'),
(0x3041, 'V'),
(0x3097, 'X'),
(0x3099, 'V'),
- (0x309B, '3', ' ゙'),
- (0x309C, '3', ' ゚'),
+ (0x309B, '3', u' ゙'),
+ (0x309C, '3', u' ゚'),
(0x309D, 'V'),
- (0x309F, 'M', 'より'),
+ (0x309F, 'M', u'より'),
(0x30A0, 'V'),
- (0x30FF, 'M', 'コト'),
+ (0x30FF, 'M', u'コト'),
(0x3100, 'X'),
(0x3105, 'V'),
(0x3130, 'X'),
- (0x3131, 'M', 'ᄀ'),
- (0x3132, 'M', 'ᄁ'),
- (0x3133, 'M', 'ᆪ'),
- (0x3134, 'M', 'ᄂ'),
- (0x3135, 'M', 'ᆬ'),
- (0x3136, 'M', 'ᆭ'),
- (0x3137, 'M', 'ᄃ'),
- (0x3138, 'M', 'ᄄ'),
- (0x3139, 'M', 'ᄅ'),
- (0x313A, 'M', 'ᆰ'),
- (0x313B, 'M', 'ᆱ'),
- (0x313C, 'M', 'ᆲ'),
- (0x313D, 'M', 'ᆳ'),
- (0x313E, 'M', 'ᆴ'),
- (0x313F, 'M', 'ᆵ'),
- (0x3140, 'M', 'ᄚ'),
- (0x3141, 'M', 'ᄆ'),
- (0x3142, 'M', 'ᄇ'),
- (0x3143, 'M', 'ᄈ'),
- (0x3144, 'M', 'ᄡ'),
- (0x3145, 'M', 'ᄉ'),
- (0x3146, 'M', 'ᄊ'),
- (0x3147, 'M', 'ᄋ'),
- (0x3148, 'M', 'ᄌ'),
- (0x3149, 'M', 'ᄍ'),
- (0x314A, 'M', 'ᄎ'),
- (0x314B, 'M', 'ᄏ'),
- (0x314C, 'M', 'ᄐ'),
- (0x314D, 'M', 'ᄑ'),
- (0x314E, 'M', 'ᄒ'),
- (0x314F, 'M', 'ᅡ'),
- (0x3150, 'M', 'ᅢ'),
- (0x3151, 'M', 'ᅣ'),
- (0x3152, 'M', 'ᅤ'),
- (0x3153, 'M', 'ᅥ'),
- (0x3154, 'M', 'ᅦ'),
- (0x3155, 'M', 'ᅧ'),
- (0x3156, 'M', 'ᅨ'),
- (0x3157, 'M', 'ᅩ'),
- (0x3158, 'M', 'ᅪ'),
- (0x3159, 'M', 'ᅫ'),
- (0x315A, 'M', 'ᅬ'),
- (0x315B, 'M', 'ᅭ'),
- (0x315C, 'M', 'ᅮ'),
- (0x315D, 'M', 'ᅯ'),
- (0x315E, 'M', 'ᅰ'),
- (0x315F, 'M', 'ᅱ'),
- (0x3160, 'M', 'ᅲ'),
- (0x3161, 'M', 'ᅳ'),
- (0x3162, 'M', 'ᅴ'),
- (0x3163, 'M', 'ᅵ'),
- (0x3164, 'X'),
- (0x3165, 'M', 'ᄔ'),
- (0x3166, 'M', 'ᄕ'),
- (0x3167, 'M', 'ᇇ'),
- (0x3168, 'M', 'ᇈ'),
- (0x3169, 'M', 'ᇌ'),
- (0x316A, 'M', 'ᇎ'),
- (0x316B, 'M', 'ᇓ'),
- (0x316C, 'M', 'ᇗ'),
- (0x316D, 'M', 'ᇙ'),
- (0x316E, 'M', 'ᄜ'),
- (0x316F, 'M', 'ᇝ'),
- (0x3170, 'M', 'ᇟ'),
- (0x3171, 'M', 'ᄝ'),
- (0x3172, 'M', 'ᄞ'),
- (0x3173, 'M', 'ᄠ'),
- (0x3174, 'M', 'ᄢ'),
- (0x3175, 'M', 'ᄣ'),
- (0x3176, 'M', 'ᄧ'),
- (0x3177, 'M', 'ᄩ'),
- (0x3178, 'M', 'ᄫ'),
- (0x3179, 'M', 'ᄬ'),
- (0x317A, 'M', 'ᄭ'),
- (0x317B, 'M', 'ᄮ'),
- (0x317C, 'M', 'ᄯ'),
- (0x317D, 'M', 'ᄲ'),
- (0x317E, 'M', 'ᄶ'),
- (0x317F, 'M', 'ᅀ'),
- (0x3180, 'M', 'ᅇ'),
- (0x3181, 'M', 'ᅌ'),
- (0x3182, 'M', 'ᇱ'),
- (0x3183, 'M', 'ᇲ'),
- (0x3184, 'M', 'ᅗ'),
+ (0x3131, 'M', u'ᄀ'),
+ (0x3132, 'M', u'ᄁ'),
+ (0x3133, 'M', u'ᆪ'),
+ (0x3134, 'M', u'ᄂ'),
+ (0x3135, 'M', u'ᆬ'),
+ (0x3136, 'M', u'ᆭ'),
+ (0x3137, 'M', u'ᄃ'),
+ (0x3138, 'M', u'ᄄ'),
]
-def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_29():
return [
- (0x3185, 'M', 'ᅘ'),
- (0x3186, 'M', 'ᅙ'),
- (0x3187, 'M', 'ᆄ'),
- (0x3188, 'M', 'ᆅ'),
- (0x3189, 'M', 'ᆈ'),
- (0x318A, 'M', 'ᆑ'),
- (0x318B, 'M', 'ᆒ'),
- (0x318C, 'M', 'ᆔ'),
- (0x318D, 'M', 'ᆞ'),
- (0x318E, 'M', 'ᆡ'),
+ (0x3139, 'M', u'ᄅ'),
+ (0x313A, 'M', u'ᆰ'),
+ (0x313B, 'M', u'ᆱ'),
+ (0x313C, 'M', u'ᆲ'),
+ (0x313D, 'M', u'ᆳ'),
+ (0x313E, 'M', u'ᆴ'),
+ (0x313F, 'M', u'ᆵ'),
+ (0x3140, 'M', u'ᄚ'),
+ (0x3141, 'M', u'ᄆ'),
+ (0x3142, 'M', u'ᄇ'),
+ (0x3143, 'M', u'ᄈ'),
+ (0x3144, 'M', u'ᄡ'),
+ (0x3145, 'M', u'ᄉ'),
+ (0x3146, 'M', u'ᄊ'),
+ (0x3147, 'M', u'ᄋ'),
+ (0x3148, 'M', u'ᄌ'),
+ (0x3149, 'M', u'ᄍ'),
+ (0x314A, 'M', u'ᄎ'),
+ (0x314B, 'M', u'ᄏ'),
+ (0x314C, 'M', u'ᄐ'),
+ (0x314D, 'M', u'ᄑ'),
+ (0x314E, 'M', u'ᄒ'),
+ (0x314F, 'M', u'ᅡ'),
+ (0x3150, 'M', u'ᅢ'),
+ (0x3151, 'M', u'ᅣ'),
+ (0x3152, 'M', u'ᅤ'),
+ (0x3153, 'M', u'ᅥ'),
+ (0x3154, 'M', u'ᅦ'),
+ (0x3155, 'M', u'ᅧ'),
+ (0x3156, 'M', u'ᅨ'),
+ (0x3157, 'M', u'ᅩ'),
+ (0x3158, 'M', u'ᅪ'),
+ (0x3159, 'M', u'ᅫ'),
+ (0x315A, 'M', u'ᅬ'),
+ (0x315B, 'M', u'ᅭ'),
+ (0x315C, 'M', u'ᅮ'),
+ (0x315D, 'M', u'ᅯ'),
+ (0x315E, 'M', u'ᅰ'),
+ (0x315F, 'M', u'ᅱ'),
+ (0x3160, 'M', u'ᅲ'),
+ (0x3161, 'M', u'ᅳ'),
+ (0x3162, 'M', u'ᅴ'),
+ (0x3163, 'M', u'ᅵ'),
+ (0x3164, 'X'),
+ (0x3165, 'M', u'ᄔ'),
+ (0x3166, 'M', u'ᄕ'),
+ (0x3167, 'M', u'ᇇ'),
+ (0x3168, 'M', u'ᇈ'),
+ (0x3169, 'M', u'ᇌ'),
+ (0x316A, 'M', u'ᇎ'),
+ (0x316B, 'M', u'ᇓ'),
+ (0x316C, 'M', u'ᇗ'),
+ (0x316D, 'M', u'ᇙ'),
+ (0x316E, 'M', u'ᄜ'),
+ (0x316F, 'M', u'ᇝ'),
+ (0x3170, 'M', u'ᇟ'),
+ (0x3171, 'M', u'ᄝ'),
+ (0x3172, 'M', u'ᄞ'),
+ (0x3173, 'M', u'ᄠ'),
+ (0x3174, 'M', u'ᄢ'),
+ (0x3175, 'M', u'ᄣ'),
+ (0x3176, 'M', u'ᄧ'),
+ (0x3177, 'M', u'ᄩ'),
+ (0x3178, 'M', u'ᄫ'),
+ (0x3179, 'M', u'ᄬ'),
+ (0x317A, 'M', u'ᄭ'),
+ (0x317B, 'M', u'ᄮ'),
+ (0x317C, 'M', u'ᄯ'),
+ (0x317D, 'M', u'ᄲ'),
+ (0x317E, 'M', u'ᄶ'),
+ (0x317F, 'M', u'ᅀ'),
+ (0x3180, 'M', u'ᅇ'),
+ (0x3181, 'M', u'ᅌ'),
+ (0x3182, 'M', u'ᇱ'),
+ (0x3183, 'M', u'ᇲ'),
+ (0x3184, 'M', u'ᅗ'),
+ (0x3185, 'M', u'ᅘ'),
+ (0x3186, 'M', u'ᅙ'),
+ (0x3187, 'M', u'ᆄ'),
+ (0x3188, 'M', u'ᆅ'),
+ (0x3189, 'M', u'ᆈ'),
+ (0x318A, 'M', u'ᆑ'),
+ (0x318B, 'M', u'ᆒ'),
+ (0x318C, 'M', u'ᆔ'),
+ (0x318D, 'M', u'ᆞ'),
+ (0x318E, 'M', u'ᆡ'),
(0x318F, 'X'),
(0x3190, 'V'),
- (0x3192, 'M', '一'),
- (0x3193, 'M', '二'),
- (0x3194, 'M', '三'),
- (0x3195, 'M', '四'),
- (0x3196, 'M', '上'),
- (0x3197, 'M', '中'),
- (0x3198, 'M', '下'),
- (0x3199, 'M', '甲'),
- (0x319A, 'M', '乙'),
- (0x319B, 'M', '丙'),
- (0x319C, 'M', '丁'),
- (0x319D, 'M', '天'),
- (0x319E, 'M', '地'),
- (0x319F, 'M', '人'),
+ (0x3192, 'M', u'一'),
+ (0x3193, 'M', u'二'),
+ (0x3194, 'M', u'三'),
+ (0x3195, 'M', u'四'),
+ (0x3196, 'M', u'上'),
+ (0x3197, 'M', u'中'),
+ (0x3198, 'M', u'下'),
+ (0x3199, 'M', u'甲'),
+ (0x319A, 'M', u'乙'),
+ (0x319B, 'M', u'丙'),
+ (0x319C, 'M', u'丁'),
+ (0x319D, 'M', u'天'),
+ ]
+
+def _seg_30():
+ return [
+ (0x319E, 'M', u'地'),
+ (0x319F, 'M', u'人'),
(0x31A0, 'V'),
+ (0x31BB, 'X'),
+ (0x31C0, 'V'),
(0x31E4, 'X'),
(0x31F0, 'V'),
- (0x3200, '3', '(ᄀ)'),
- (0x3201, '3', '(ᄂ)'),
- (0x3202, '3', '(ᄃ)'),
- (0x3203, '3', '(ᄅ)'),
- (0x3204, '3', '(ᄆ)'),
- (0x3205, '3', '(ᄇ)'),
- (0x3206, '3', '(ᄉ)'),
- (0x3207, '3', '(ᄋ)'),
- (0x3208, '3', '(ᄌ)'),
- (0x3209, '3', '(ᄎ)'),
- (0x320A, '3', '(ᄏ)'),
- (0x320B, '3', '(ᄐ)'),
- (0x320C, '3', '(ᄑ)'),
- (0x320D, '3', '(ᄒ)'),
- (0x320E, '3', '(가)'),
- (0x320F, '3', '(나)'),
- (0x3210, '3', '(다)'),
- (0x3211, '3', '(라)'),
- (0x3212, '3', '(마)'),
- (0x3213, '3', '(바)'),
- (0x3214, '3', '(사)'),
- (0x3215, '3', '(아)'),
- (0x3216, '3', '(자)'),
- (0x3217, '3', '(차)'),
- (0x3218, '3', '(카)'),
- (0x3219, '3', '(타)'),
- (0x321A, '3', '(파)'),
- (0x321B, '3', '(하)'),
- (0x321C, '3', '(주)'),
- (0x321D, '3', '(오전)'),
- (0x321E, '3', '(오후)'),
+ (0x3200, '3', u'(ᄀ)'),
+ (0x3201, '3', u'(ᄂ)'),
+ (0x3202, '3', u'(ᄃ)'),
+ (0x3203, '3', u'(ᄅ)'),
+ (0x3204, '3', u'(ᄆ)'),
+ (0x3205, '3', u'(ᄇ)'),
+ (0x3206, '3', u'(ᄉ)'),
+ (0x3207, '3', u'(ᄋ)'),
+ (0x3208, '3', u'(ᄌ)'),
+ (0x3209, '3', u'(ᄎ)'),
+ (0x320A, '3', u'(ᄏ)'),
+ (0x320B, '3', u'(ᄐ)'),
+ (0x320C, '3', u'(ᄑ)'),
+ (0x320D, '3', u'(ᄒ)'),
+ (0x320E, '3', u'(가)'),
+ (0x320F, '3', u'(나)'),
+ (0x3210, '3', u'(다)'),
+ (0x3211, '3', u'(라)'),
+ (0x3212, '3', u'(마)'),
+ (0x3213, '3', u'(바)'),
+ (0x3214, '3', u'(사)'),
+ (0x3215, '3', u'(아)'),
+ (0x3216, '3', u'(자)'),
+ (0x3217, '3', u'(차)'),
+ (0x3218, '3', u'(카)'),
+ (0x3219, '3', u'(타)'),
+ (0x321A, '3', u'(파)'),
+ (0x321B, '3', u'(하)'),
+ (0x321C, '3', u'(주)'),
+ (0x321D, '3', u'(오전)'),
+ (0x321E, '3', u'(오후)'),
(0x321F, 'X'),
- (0x3220, '3', '(一)'),
- (0x3221, '3', '(二)'),
- (0x3222, '3', '(三)'),
- (0x3223, '3', '(四)'),
- (0x3224, '3', '(五)'),
- (0x3225, '3', '(六)'),
- (0x3226, '3', '(七)'),
- (0x3227, '3', '(八)'),
- (0x3228, '3', '(九)'),
- (0x3229, '3', '(十)'),
- (0x322A, '3', '(月)'),
- (0x322B, '3', '(火)'),
- (0x322C, '3', '(水)'),
- (0x322D, '3', '(木)'),
- (0x322E, '3', '(金)'),
- (0x322F, '3', '(土)'),
- (0x3230, '3', '(日)'),
- (0x3231, '3', '(株)'),
- (0x3232, '3', '(有)'),
- (0x3233, '3', '(社)'),
- (0x3234, '3', '(名)'),
- (0x3235, '3', '(特)'),
- (0x3236, '3', '(財)'),
- (0x3237, '3', '(祝)'),
- (0x3238, '3', '(労)'),
- (0x3239, '3', '(代)'),
- (0x323A, '3', '(呼)'),
- (0x323B, '3', '(学)'),
- (0x323C, '3', '(監)'),
- (0x323D, '3', '(企)'),
- (0x323E, '3', '(資)'),
- (0x323F, '3', '(協)'),
- (0x3240, '3', '(祭)'),
- (0x3241, '3', '(休)'),
- (0x3242, '3', '(自)'),
- (0x3243, '3', '(至)'),
- (0x3244, 'M', '問'),
- (0x3245, 'M', '幼'),
- (0x3246, 'M', '文'),
+ (0x3220, '3', u'(一)'),
+ (0x3221, '3', u'(二)'),
+ (0x3222, '3', u'(三)'),
+ (0x3223, '3', u'(四)'),
+ (0x3224, '3', u'(五)'),
+ (0x3225, '3', u'(六)'),
+ (0x3226, '3', u'(七)'),
+ (0x3227, '3', u'(八)'),
+ (0x3228, '3', u'(九)'),
+ (0x3229, '3', u'(十)'),
+ (0x322A, '3', u'(月)'),
+ (0x322B, '3', u'(火)'),
+ (0x322C, '3', u'(水)'),
+ (0x322D, '3', u'(木)'),
+ (0x322E, '3', u'(金)'),
+ (0x322F, '3', u'(土)'),
+ (0x3230, '3', u'(日)'),
+ (0x3231, '3', u'(株)'),
+ (0x3232, '3', u'(有)'),
+ (0x3233, '3', u'(社)'),
+ (0x3234, '3', u'(名)'),
+ (0x3235, '3', u'(特)'),
+ (0x3236, '3', u'(財)'),
+ (0x3237, '3', u'(祝)'),
+ (0x3238, '3', u'(労)'),
+ (0x3239, '3', u'(代)'),
+ (0x323A, '3', u'(呼)'),
+ (0x323B, '3', u'(学)'),
+ (0x323C, '3', u'(監)'),
+ (0x323D, '3', u'(企)'),
+ (0x323E, '3', u'(資)'),
+ (0x323F, '3', u'(協)'),
+ (0x3240, '3', u'(祭)'),
+ (0x3241, '3', u'(休)'),
+ (0x3242, '3', u'(自)'),
+ (0x3243, '3', u'(至)'),
+ (0x3244, 'M', u'問'),
+ (0x3245, 'M', u'幼'),
+ (0x3246, 'M', u'文'),
+ (0x3247, 'M', u'箏'),
+ (0x3248, 'V'),
+ (0x3250, 'M', u'pte'),
+ (0x3251, 'M', u'21'),
+ (0x3252, 'M', u'22'),
+ (0x3253, 'M', u'23'),
+ (0x3254, 'M', u'24'),
+ (0x3255, 'M', u'25'),
+ (0x3256, 'M', u'26'),
+ (0x3257, 'M', u'27'),
+ (0x3258, 'M', u'28'),
+ (0x3259, 'M', u'29'),
+ (0x325A, 'M', u'30'),
+ (0x325B, 'M', u'31'),
+ (0x325C, 'M', u'32'),
+ (0x325D, 'M', u'33'),
+ (0x325E, 'M', u'34'),
+ (0x325F, 'M', u'35'),
+ (0x3260, 'M', u'ᄀ'),
+ (0x3261, 'M', u'ᄂ'),
+ (0x3262, 'M', u'ᄃ'),
+ (0x3263, 'M', u'ᄅ'),
]
-def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_31():
return [
- (0x3247, 'M', '箏'),
- (0x3248, 'V'),
- (0x3250, 'M', 'pte'),
- (0x3251, 'M', '21'),
- (0x3252, 'M', '22'),
- (0x3253, 'M', '23'),
- (0x3254, 'M', '24'),
- (0x3255, 'M', '25'),
- (0x3256, 'M', '26'),
- (0x3257, 'M', '27'),
- (0x3258, 'M', '28'),
- (0x3259, 'M', '29'),
- (0x325A, 'M', '30'),
- (0x325B, 'M', '31'),
- (0x325C, 'M', '32'),
- (0x325D, 'M', '33'),
- (0x325E, 'M', '34'),
- (0x325F, 'M', '35'),
- (0x3260, 'M', 'ᄀ'),
- (0x3261, 'M', 'ᄂ'),
- (0x3262, 'M', 'ᄃ'),
- (0x3263, 'M', 'ᄅ'),
- (0x3264, 'M', 'ᄆ'),
- (0x3265, 'M', 'ᄇ'),
- (0x3266, 'M', 'ᄉ'),
- (0x3267, 'M', 'ᄋ'),
- (0x3268, 'M', 'ᄌ'),
- (0x3269, 'M', 'ᄎ'),
- (0x326A, 'M', 'ᄏ'),
- (0x326B, 'M', 'ᄐ'),
- (0x326C, 'M', 'ᄑ'),
- (0x326D, 'M', 'ᄒ'),
- (0x326E, 'M', '가'),
- (0x326F, 'M', '나'),
- (0x3270, 'M', '다'),
- (0x3271, 'M', '라'),
- (0x3272, 'M', '마'),
- (0x3273, 'M', '바'),
- (0x3274, 'M', '사'),
- (0x3275, 'M', '아'),
- (0x3276, 'M', '자'),
- (0x3277, 'M', '차'),
- (0x3278, 'M', '카'),
- (0x3279, 'M', '타'),
- (0x327A, 'M', '파'),
- (0x327B, 'M', '하'),
- (0x327C, 'M', '참고'),
- (0x327D, 'M', '주의'),
- (0x327E, 'M', '우'),
+ (0x3264, 'M', u'ᄆ'),
+ (0x3265, 'M', u'ᄇ'),
+ (0x3266, 'M', u'ᄉ'),
+ (0x3267, 'M', u'ᄋ'),
+ (0x3268, 'M', u'ᄌ'),
+ (0x3269, 'M', u'ᄎ'),
+ (0x326A, 'M', u'ᄏ'),
+ (0x326B, 'M', u'ᄐ'),
+ (0x326C, 'M', u'ᄑ'),
+ (0x326D, 'M', u'ᄒ'),
+ (0x326E, 'M', u'가'),
+ (0x326F, 'M', u'나'),
+ (0x3270, 'M', u'다'),
+ (0x3271, 'M', u'라'),
+ (0x3272, 'M', u'마'),
+ (0x3273, 'M', u'바'),
+ (0x3274, 'M', u'사'),
+ (0x3275, 'M', u'아'),
+ (0x3276, 'M', u'자'),
+ (0x3277, 'M', u'차'),
+ (0x3278, 'M', u'카'),
+ (0x3279, 'M', u'타'),
+ (0x327A, 'M', u'파'),
+ (0x327B, 'M', u'하'),
+ (0x327C, 'M', u'참고'),
+ (0x327D, 'M', u'주의'),
+ (0x327E, 'M', u'우'),
(0x327F, 'V'),
- (0x3280, 'M', '一'),
- (0x3281, 'M', '二'),
- (0x3282, 'M', '三'),
- (0x3283, 'M', '四'),
- (0x3284, 'M', '五'),
- (0x3285, 'M', '六'),
- (0x3286, 'M', '七'),
- (0x3287, 'M', '八'),
- (0x3288, 'M', '九'),
- (0x3289, 'M', '十'),
- (0x328A, 'M', '月'),
- (0x328B, 'M', '火'),
- (0x328C, 'M', '水'),
- (0x328D, 'M', '木'),
- (0x328E, 'M', '金'),
- (0x328F, 'M', '土'),
- (0x3290, 'M', '日'),
- (0x3291, 'M', '株'),
- (0x3292, 'M', '有'),
- (0x3293, 'M', '社'),
- (0x3294, 'M', '名'),
- (0x3295, 'M', '特'),
- (0x3296, 'M', '財'),
- (0x3297, 'M', '祝'),
- (0x3298, 'M', '労'),
- (0x3299, 'M', '秘'),
- (0x329A, 'M', '男'),
- (0x329B, 'M', '女'),
- (0x329C, 'M', '適'),
- (0x329D, 'M', '優'),
- (0x329E, 'M', '印'),
- (0x329F, 'M', '注'),
- (0x32A0, 'M', '項'),
- (0x32A1, 'M', '休'),
- (0x32A2, 'M', '写'),
- (0x32A3, 'M', '正'),
- (0x32A4, 'M', '上'),
- (0x32A5, 'M', '中'),
- (0x32A6, 'M', '下'),
- (0x32A7, 'M', '左'),
- (0x32A8, 'M', '右'),
- (0x32A9, 'M', '医'),
- (0x32AA, 'M', '宗'),
- (0x32AB, 'M', '学'),
- (0x32AC, 'M', '監'),
- (0x32AD, 'M', '企'),
- (0x32AE, 'M', '資'),
- (0x32AF, 'M', '協'),
- (0x32B0, 'M', '夜'),
- (0x32B1, 'M', '36'),
+ (0x3280, 'M', u'一'),
+ (0x3281, 'M', u'二'),
+ (0x3282, 'M', u'三'),
+ (0x3283, 'M', u'四'),
+ (0x3284, 'M', u'五'),
+ (0x3285, 'M', u'六'),
+ (0x3286, 'M', u'七'),
+ (0x3287, 'M', u'八'),
+ (0x3288, 'M', u'九'),
+ (0x3289, 'M', u'十'),
+ (0x328A, 'M', u'月'),
+ (0x328B, 'M', u'火'),
+ (0x328C, 'M', u'水'),
+ (0x328D, 'M', u'木'),
+ (0x328E, 'M', u'金'),
+ (0x328F, 'M', u'土'),
+ (0x3290, 'M', u'日'),
+ (0x3291, 'M', u'株'),
+ (0x3292, 'M', u'有'),
+ (0x3293, 'M', u'社'),
+ (0x3294, 'M', u'名'),
+ (0x3295, 'M', u'特'),
+ (0x3296, 'M', u'財'),
+ (0x3297, 'M', u'祝'),
+ (0x3298, 'M', u'労'),
+ (0x3299, 'M', u'秘'),
+ (0x329A, 'M', u'男'),
+ (0x329B, 'M', u'女'),
+ (0x329C, 'M', u'適'),
+ (0x329D, 'M', u'優'),
+ (0x329E, 'M', u'印'),
+ (0x329F, 'M', u'注'),
+ (0x32A0, 'M', u'項'),
+ (0x32A1, 'M', u'休'),
+ (0x32A2, 'M', u'写'),
+ (0x32A3, 'M', u'正'),
+ (0x32A4, 'M', u'上'),
+ (0x32A5, 'M', u'中'),
+ (0x32A6, 'M', u'下'),
+ (0x32A7, 'M', u'左'),
+ (0x32A8, 'M', u'右'),
+ (0x32A9, 'M', u'医'),
+ (0x32AA, 'M', u'宗'),
+ (0x32AB, 'M', u'学'),
+ (0x32AC, 'M', u'監'),
+ (0x32AD, 'M', u'企'),
+ (0x32AE, 'M', u'資'),
+ (0x32AF, 'M', u'協'),
+ (0x32B0, 'M', u'夜'),
+ (0x32B1, 'M', u'36'),
+ (0x32B2, 'M', u'37'),
+ (0x32B3, 'M', u'38'),
+ (0x32B4, 'M', u'39'),
+ (0x32B5, 'M', u'40'),
+ (0x32B6, 'M', u'41'),
+ (0x32B7, 'M', u'42'),
+ (0x32B8, 'M', u'43'),
+ (0x32B9, 'M', u'44'),
+ (0x32BA, 'M', u'45'),
+ (0x32BB, 'M', u'46'),
+ (0x32BC, 'M', u'47'),
+ (0x32BD, 'M', u'48'),
+ (0x32BE, 'M', u'49'),
+ (0x32BF, 'M', u'50'),
+ (0x32C0, 'M', u'1月'),
+ (0x32C1, 'M', u'2月'),
+ (0x32C2, 'M', u'3月'),
+ (0x32C3, 'M', u'4月'),
+ (0x32C4, 'M', u'5月'),
+ (0x32C5, 'M', u'6月'),
+ (0x32C6, 'M', u'7月'),
+ (0x32C7, 'M', u'8月'),
]
-def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_32():
return [
- (0x32B2, 'M', '37'),
- (0x32B3, 'M', '38'),
- (0x32B4, 'M', '39'),
- (0x32B5, 'M', '40'),
- (0x32B6, 'M', '41'),
- (0x32B7, 'M', '42'),
- (0x32B8, 'M', '43'),
- (0x32B9, 'M', '44'),
- (0x32BA, 'M', '45'),
- (0x32BB, 'M', '46'),
- (0x32BC, 'M', '47'),
- (0x32BD, 'M', '48'),
- (0x32BE, 'M', '49'),
- (0x32BF, 'M', '50'),
- (0x32C0, 'M', '1月'),
- (0x32C1, 'M', '2月'),
- (0x32C2, 'M', '3月'),
- (0x32C3, 'M', '4月'),
- (0x32C4, 'M', '5月'),
- (0x32C5, 'M', '6月'),
- (0x32C6, 'M', '7月'),
- (0x32C7, 'M', '8月'),
- (0x32C8, 'M', '9月'),
- (0x32C9, 'M', '10月'),
- (0x32CA, 'M', '11月'),
- (0x32CB, 'M', '12月'),
- (0x32CC, 'M', 'hg'),
- (0x32CD, 'M', 'erg'),
- (0x32CE, 'M', 'ev'),
- (0x32CF, 'M', 'ltd'),
- (0x32D0, 'M', 'ア'),
- (0x32D1, 'M', 'イ'),
- (0x32D2, 'M', 'ウ'),
- (0x32D3, 'M', 'エ'),
- (0x32D4, 'M', 'オ'),
- (0x32D5, 'M', 'カ'),
- (0x32D6, 'M', 'キ'),
- (0x32D7, 'M', 'ク'),
- (0x32D8, 'M', 'ケ'),
- (0x32D9, 'M', 'コ'),
- (0x32DA, 'M', 'サ'),
- (0x32DB, 'M', 'シ'),
- (0x32DC, 'M', 'ス'),
- (0x32DD, 'M', 'セ'),
- (0x32DE, 'M', 'ソ'),
- (0x32DF, 'M', 'タ'),
- (0x32E0, 'M', 'チ'),
- (0x32E1, 'M', 'ツ'),
- (0x32E2, 'M', 'テ'),
- (0x32E3, 'M', 'ト'),
- (0x32E4, 'M', 'ナ'),
- (0x32E5, 'M', 'ニ'),
- (0x32E6, 'M', 'ヌ'),
- (0x32E7, 'M', 'ネ'),
- (0x32E8, 'M', 'ノ'),
- (0x32E9, 'M', 'ハ'),
- (0x32EA, 'M', 'ヒ'),
- (0x32EB, 'M', 'フ'),
- (0x32EC, 'M', 'ヘ'),
- (0x32ED, 'M', 'ホ'),
- (0x32EE, 'M', 'マ'),
- (0x32EF, 'M', 'ミ'),
- (0x32F0, 'M', 'ム'),
- (0x32F1, 'M', 'メ'),
- (0x32F2, 'M', 'モ'),
- (0x32F3, 'M', 'ヤ'),
- (0x32F4, 'M', 'ユ'),
- (0x32F5, 'M', 'ヨ'),
- (0x32F6, 'M', 'ラ'),
- (0x32F7, 'M', 'リ'),
- (0x32F8, 'M', 'ル'),
- (0x32F9, 'M', 'レ'),
- (0x32FA, 'M', 'ロ'),
- (0x32FB, 'M', 'ワ'),
- (0x32FC, 'M', 'ヰ'),
- (0x32FD, 'M', 'ヱ'),
- (0x32FE, 'M', 'ヲ'),
- (0x32FF, 'M', '令和'),
- (0x3300, 'M', 'アパート'),
- (0x3301, 'M', 'アルファ'),
- (0x3302, 'M', 'アンペア'),
- (0x3303, 'M', 'アール'),
- (0x3304, 'M', 'イニング'),
- (0x3305, 'M', 'インチ'),
- (0x3306, 'M', 'ウォン'),
- (0x3307, 'M', 'エスクード'),
- (0x3308, 'M', 'エーカー'),
- (0x3309, 'M', 'オンス'),
- (0x330A, 'M', 'オーム'),
- (0x330B, 'M', 'カイリ'),
- (0x330C, 'M', 'カラット'),
- (0x330D, 'M', 'カロリー'),
- (0x330E, 'M', 'ガロン'),
- (0x330F, 'M', 'ガンマ'),
- (0x3310, 'M', 'ギガ'),
- (0x3311, 'M', 'ギニー'),
- (0x3312, 'M', 'キュリー'),
- (0x3313, 'M', 'ギルダー'),
- (0x3314, 'M', 'キロ'),
- (0x3315, 'M', 'キログラム'),
+ (0x32C8, 'M', u'9月'),
+ (0x32C9, 'M', u'10月'),
+ (0x32CA, 'M', u'11月'),
+ (0x32CB, 'M', u'12月'),
+ (0x32CC, 'M', u'hg'),
+ (0x32CD, 'M', u'erg'),
+ (0x32CE, 'M', u'ev'),
+ (0x32CF, 'M', u'ltd'),
+ (0x32D0, 'M', u'ア'),
+ (0x32D1, 'M', u'イ'),
+ (0x32D2, 'M', u'ウ'),
+ (0x32D3, 'M', u'エ'),
+ (0x32D4, 'M', u'オ'),
+ (0x32D5, 'M', u'カ'),
+ (0x32D6, 'M', u'キ'),
+ (0x32D7, 'M', u'ク'),
+ (0x32D8, 'M', u'ケ'),
+ (0x32D9, 'M', u'コ'),
+ (0x32DA, 'M', u'サ'),
+ (0x32DB, 'M', u'シ'),
+ (0x32DC, 'M', u'ス'),
+ (0x32DD, 'M', u'セ'),
+ (0x32DE, 'M', u'ソ'),
+ (0x32DF, 'M', u'タ'),
+ (0x32E0, 'M', u'チ'),
+ (0x32E1, 'M', u'ツ'),
+ (0x32E2, 'M', u'テ'),
+ (0x32E3, 'M', u'ト'),
+ (0x32E4, 'M', u'ナ'),
+ (0x32E5, 'M', u'ニ'),
+ (0x32E6, 'M', u'ヌ'),
+ (0x32E7, 'M', u'ネ'),
+ (0x32E8, 'M', u'ノ'),
+ (0x32E9, 'M', u'ハ'),
+ (0x32EA, 'M', u'ヒ'),
+ (0x32EB, 'M', u'フ'),
+ (0x32EC, 'M', u'ヘ'),
+ (0x32ED, 'M', u'ホ'),
+ (0x32EE, 'M', u'マ'),
+ (0x32EF, 'M', u'ミ'),
+ (0x32F0, 'M', u'ム'),
+ (0x32F1, 'M', u'メ'),
+ (0x32F2, 'M', u'モ'),
+ (0x32F3, 'M', u'ヤ'),
+ (0x32F4, 'M', u'ユ'),
+ (0x32F5, 'M', u'ヨ'),
+ (0x32F6, 'M', u'ラ'),
+ (0x32F7, 'M', u'リ'),
+ (0x32F8, 'M', u'ル'),
+ (0x32F9, 'M', u'レ'),
+ (0x32FA, 'M', u'ロ'),
+ (0x32FB, 'M', u'ワ'),
+ (0x32FC, 'M', u'ヰ'),
+ (0x32FD, 'M', u'ヱ'),
+ (0x32FE, 'M', u'ヲ'),
+ (0x32FF, 'X'),
+ (0x3300, 'M', u'アパート'),
+ (0x3301, 'M', u'アルファ'),
+ (0x3302, 'M', u'アンペア'),
+ (0x3303, 'M', u'アール'),
+ (0x3304, 'M', u'イニング'),
+ (0x3305, 'M', u'インチ'),
+ (0x3306, 'M', u'ウォン'),
+ (0x3307, 'M', u'エスクード'),
+ (0x3308, 'M', u'エーカー'),
+ (0x3309, 'M', u'オンス'),
+ (0x330A, 'M', u'オーム'),
+ (0x330B, 'M', u'カイリ'),
+ (0x330C, 'M', u'カラット'),
+ (0x330D, 'M', u'カロリー'),
+ (0x330E, 'M', u'ガロン'),
+ (0x330F, 'M', u'ガンマ'),
+ (0x3310, 'M', u'ギガ'),
+ (0x3311, 'M', u'ギニー'),
+ (0x3312, 'M', u'キュリー'),
+ (0x3313, 'M', u'ギルダー'),
+ (0x3314, 'M', u'キロ'),
+ (0x3315, 'M', u'キログラム'),
+ (0x3316, 'M', u'キロメートル'),
+ (0x3317, 'M', u'キロワット'),
+ (0x3318, 'M', u'グラム'),
+ (0x3319, 'M', u'グラムトン'),
+ (0x331A, 'M', u'クルゼイロ'),
+ (0x331B, 'M', u'クローネ'),
+ (0x331C, 'M', u'ケース'),
+ (0x331D, 'M', u'コルナ'),
+ (0x331E, 'M', u'コーポ'),
+ (0x331F, 'M', u'サイクル'),
+ (0x3320, 'M', u'サンチーム'),
+ (0x3321, 'M', u'シリング'),
+ (0x3322, 'M', u'センチ'),
+ (0x3323, 'M', u'セント'),
+ (0x3324, 'M', u'ダース'),
+ (0x3325, 'M', u'デシ'),
+ (0x3326, 'M', u'ドル'),
+ (0x3327, 'M', u'トン'),
+ (0x3328, 'M', u'ナノ'),
+ (0x3329, 'M', u'ノット'),
+ (0x332A, 'M', u'ハイツ'),
+ (0x332B, 'M', u'パーセント'),
]
-def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_33():
return [
- (0x3316, 'M', 'キロメートル'),
- (0x3317, 'M', 'キロワット'),
- (0x3318, 'M', 'グラム'),
- (0x3319, 'M', 'グラムトン'),
- (0x331A, 'M', 'クルゼイロ'),
- (0x331B, 'M', 'クローネ'),
- (0x331C, 'M', 'ケース'),
- (0x331D, 'M', 'コルナ'),
- (0x331E, 'M', 'コーポ'),
- (0x331F, 'M', 'サイクル'),
- (0x3320, 'M', 'サンチーム'),
- (0x3321, 'M', 'シリング'),
- (0x3322, 'M', 'センチ'),
- (0x3323, 'M', 'セント'),
- (0x3324, 'M', 'ダース'),
- (0x3325, 'M', 'デシ'),
- (0x3326, 'M', 'ドル'),
- (0x3327, 'M', 'トン'),
- (0x3328, 'M', 'ナノ'),
- (0x3329, 'M', 'ノット'),
- (0x332A, 'M', 'ハイツ'),
- (0x332B, 'M', 'パーセント'),
- (0x332C, 'M', 'パーツ'),
- (0x332D, 'M', 'バーレル'),
- (0x332E, 'M', 'ピアストル'),
- (0x332F, 'M', 'ピクル'),
- (0x3330, 'M', 'ピコ'),
- (0x3331, 'M', 'ビル'),
- (0x3332, 'M', 'ファラッド'),
- (0x3333, 'M', 'フィート'),
- (0x3334, 'M', 'ブッシェル'),
- (0x3335, 'M', 'フラン'),
- (0x3336, 'M', 'ヘクタール'),
- (0x3337, 'M', 'ペソ'),
- (0x3338, 'M', 'ペニヒ'),
- (0x3339, 'M', 'ヘルツ'),
- (0x333A, 'M', 'ペンス'),
- (0x333B, 'M', 'ページ'),
- (0x333C, 'M', 'ベータ'),
- (0x333D, 'M', 'ポイント'),
- (0x333E, 'M', 'ボルト'),
- (0x333F, 'M', 'ホン'),
- (0x3340, 'M', 'ポンド'),
- (0x3341, 'M', 'ホール'),
- (0x3342, 'M', 'ホーン'),
- (0x3343, 'M', 'マイクロ'),
- (0x3344, 'M', 'マイル'),
- (0x3345, 'M', 'マッハ'),
- (0x3346, 'M', 'マルク'),
- (0x3347, 'M', 'マンション'),
- (0x3348, 'M', 'ミクロン'),
- (0x3349, 'M', 'ミリ'),
- (0x334A, 'M', 'ミリバール'),
- (0x334B, 'M', 'メガ'),
- (0x334C, 'M', 'メガトン'),
- (0x334D, 'M', 'メートル'),
- (0x334E, 'M', 'ヤード'),
- (0x334F, 'M', 'ヤール'),
- (0x3350, 'M', 'ユアン'),
- (0x3351, 'M', 'リットル'),
- (0x3352, 'M', 'リラ'),
- (0x3353, 'M', 'ルピー'),
- (0x3354, 'M', 'ルーブル'),
- (0x3355, 'M', 'レム'),
- (0x3356, 'M', 'レントゲン'),
- (0x3357, 'M', 'ワット'),
- (0x3358, 'M', '0点'),
- (0x3359, 'M', '1点'),
- (0x335A, 'M', '2点'),
- (0x335B, 'M', '3点'),
- (0x335C, 'M', '4点'),
- (0x335D, 'M', '5点'),
- (0x335E, 'M', '6点'),
- (0x335F, 'M', '7点'),
- (0x3360, 'M', '8点'),
- (0x3361, 'M', '9点'),
- (0x3362, 'M', '10点'),
- (0x3363, 'M', '11点'),
- (0x3364, 'M', '12点'),
- (0x3365, 'M', '13点'),
- (0x3366, 'M', '14点'),
- (0x3367, 'M', '15点'),
- (0x3368, 'M', '16点'),
- (0x3369, 'M', '17点'),
- (0x336A, 'M', '18点'),
- (0x336B, 'M', '19点'),
- (0x336C, 'M', '20点'),
- (0x336D, 'M', '21点'),
- (0x336E, 'M', '22点'),
- (0x336F, 'M', '23点'),
- (0x3370, 'M', '24点'),
- (0x3371, 'M', 'hpa'),
- (0x3372, 'M', 'da'),
- (0x3373, 'M', 'au'),
- (0x3374, 'M', 'bar'),
- (0x3375, 'M', 'ov'),
- (0x3376, 'M', 'pc'),
- (0x3377, 'M', 'dm'),
- (0x3378, 'M', 'dm2'),
- (0x3379, 'M', 'dm3'),
+ (0x332C, 'M', u'パーツ'),
+ (0x332D, 'M', u'バーレル'),
+ (0x332E, 'M', u'ピアストル'),
+ (0x332F, 'M', u'ピクル'),
+ (0x3330, 'M', u'ピコ'),
+ (0x3331, 'M', u'ビル'),
+ (0x3332, 'M', u'ファラッド'),
+ (0x3333, 'M', u'フィート'),
+ (0x3334, 'M', u'ブッシェル'),
+ (0x3335, 'M', u'フラン'),
+ (0x3336, 'M', u'ヘクタール'),
+ (0x3337, 'M', u'ペソ'),
+ (0x3338, 'M', u'ペニヒ'),
+ (0x3339, 'M', u'ヘルツ'),
+ (0x333A, 'M', u'ペンス'),
+ (0x333B, 'M', u'ページ'),
+ (0x333C, 'M', u'ベータ'),
+ (0x333D, 'M', u'ポイント'),
+ (0x333E, 'M', u'ボルト'),
+ (0x333F, 'M', u'ホン'),
+ (0x3340, 'M', u'ポンド'),
+ (0x3341, 'M', u'ホール'),
+ (0x3342, 'M', u'ホーン'),
+ (0x3343, 'M', u'マイクロ'),
+ (0x3344, 'M', u'マイル'),
+ (0x3345, 'M', u'マッハ'),
+ (0x3346, 'M', u'マルク'),
+ (0x3347, 'M', u'マンション'),
+ (0x3348, 'M', u'ミクロン'),
+ (0x3349, 'M', u'ミリ'),
+ (0x334A, 'M', u'ミリバール'),
+ (0x334B, 'M', u'メガ'),
+ (0x334C, 'M', u'メガトン'),
+ (0x334D, 'M', u'メートル'),
+ (0x334E, 'M', u'ヤード'),
+ (0x334F, 'M', u'ヤール'),
+ (0x3350, 'M', u'ユアン'),
+ (0x3351, 'M', u'リットル'),
+ (0x3352, 'M', u'リラ'),
+ (0x3353, 'M', u'ルピー'),
+ (0x3354, 'M', u'ルーブル'),
+ (0x3355, 'M', u'レム'),
+ (0x3356, 'M', u'レントゲン'),
+ (0x3357, 'M', u'ワット'),
+ (0x3358, 'M', u'0点'),
+ (0x3359, 'M', u'1点'),
+ (0x335A, 'M', u'2点'),
+ (0x335B, 'M', u'3点'),
+ (0x335C, 'M', u'4点'),
+ (0x335D, 'M', u'5点'),
+ (0x335E, 'M', u'6点'),
+ (0x335F, 'M', u'7点'),
+ (0x3360, 'M', u'8点'),
+ (0x3361, 'M', u'9点'),
+ (0x3362, 'M', u'10点'),
+ (0x3363, 'M', u'11点'),
+ (0x3364, 'M', u'12点'),
+ (0x3365, 'M', u'13点'),
+ (0x3366, 'M', u'14点'),
+ (0x3367, 'M', u'15点'),
+ (0x3368, 'M', u'16点'),
+ (0x3369, 'M', u'17点'),
+ (0x336A, 'M', u'18点'),
+ (0x336B, 'M', u'19点'),
+ (0x336C, 'M', u'20点'),
+ (0x336D, 'M', u'21点'),
+ (0x336E, 'M', u'22点'),
+ (0x336F, 'M', u'23点'),
+ (0x3370, 'M', u'24点'),
+ (0x3371, 'M', u'hpa'),
+ (0x3372, 'M', u'da'),
+ (0x3373, 'M', u'au'),
+ (0x3374, 'M', u'bar'),
+ (0x3375, 'M', u'ov'),
+ (0x3376, 'M', u'pc'),
+ (0x3377, 'M', u'dm'),
+ (0x3378, 'M', u'dm2'),
+ (0x3379, 'M', u'dm3'),
+ (0x337A, 'M', u'iu'),
+ (0x337B, 'M', u'平成'),
+ (0x337C, 'M', u'昭和'),
+ (0x337D, 'M', u'大正'),
+ (0x337E, 'M', u'明治'),
+ (0x337F, 'M', u'株式会社'),
+ (0x3380, 'M', u'pa'),
+ (0x3381, 'M', u'na'),
+ (0x3382, 'M', u'μa'),
+ (0x3383, 'M', u'ma'),
+ (0x3384, 'M', u'ka'),
+ (0x3385, 'M', u'kb'),
+ (0x3386, 'M', u'mb'),
+ (0x3387, 'M', u'gb'),
+ (0x3388, 'M', u'cal'),
+ (0x3389, 'M', u'kcal'),
+ (0x338A, 'M', u'pf'),
+ (0x338B, 'M', u'nf'),
+ (0x338C, 'M', u'μf'),
+ (0x338D, 'M', u'μg'),
+ (0x338E, 'M', u'mg'),
+ (0x338F, 'M', u'kg'),
]
-def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_34():
return [
- (0x337A, 'M', 'iu'),
- (0x337B, 'M', '平成'),
- (0x337C, 'M', '昭和'),
- (0x337D, 'M', '大正'),
- (0x337E, 'M', '明治'),
- (0x337F, 'M', '株式会社'),
- (0x3380, 'M', 'pa'),
- (0x3381, 'M', 'na'),
- (0x3382, 'M', 'μa'),
- (0x3383, 'M', 'ma'),
- (0x3384, 'M', 'ka'),
- (0x3385, 'M', 'kb'),
- (0x3386, 'M', 'mb'),
- (0x3387, 'M', 'gb'),
- (0x3388, 'M', 'cal'),
- (0x3389, 'M', 'kcal'),
- (0x338A, 'M', 'pf'),
- (0x338B, 'M', 'nf'),
- (0x338C, 'M', 'μf'),
- (0x338D, 'M', 'μg'),
- (0x338E, 'M', 'mg'),
- (0x338F, 'M', 'kg'),
- (0x3390, 'M', 'hz'),
- (0x3391, 'M', 'khz'),
- (0x3392, 'M', 'mhz'),
- (0x3393, 'M', 'ghz'),
- (0x3394, 'M', 'thz'),
- (0x3395, 'M', 'μl'),
- (0x3396, 'M', 'ml'),
- (0x3397, 'M', 'dl'),
- (0x3398, 'M', 'kl'),
- (0x3399, 'M', 'fm'),
- (0x339A, 'M', 'nm'),
- (0x339B, 'M', 'μm'),
- (0x339C, 'M', 'mm'),
- (0x339D, 'M', 'cm'),
- (0x339E, 'M', 'km'),
- (0x339F, 'M', 'mm2'),
- (0x33A0, 'M', 'cm2'),
- (0x33A1, 'M', 'm2'),
- (0x33A2, 'M', 'km2'),
- (0x33A3, 'M', 'mm3'),
- (0x33A4, 'M', 'cm3'),
- (0x33A5, 'M', 'm3'),
- (0x33A6, 'M', 'km3'),
- (0x33A7, 'M', 'm∕s'),
- (0x33A8, 'M', 'm∕s2'),
- (0x33A9, 'M', 'pa'),
- (0x33AA, 'M', 'kpa'),
- (0x33AB, 'M', 'mpa'),
- (0x33AC, 'M', 'gpa'),
- (0x33AD, 'M', 'rad'),
- (0x33AE, 'M', 'rad∕s'),
- (0x33AF, 'M', 'rad∕s2'),
- (0x33B0, 'M', 'ps'),
- (0x33B1, 'M', 'ns'),
- (0x33B2, 'M', 'μs'),
- (0x33B3, 'M', 'ms'),
- (0x33B4, 'M', 'pv'),
- (0x33B5, 'M', 'nv'),
- (0x33B6, 'M', 'μv'),
- (0x33B7, 'M', 'mv'),
- (0x33B8, 'M', 'kv'),
- (0x33B9, 'M', 'mv'),
- (0x33BA, 'M', 'pw'),
- (0x33BB, 'M', 'nw'),
- (0x33BC, 'M', 'μw'),
- (0x33BD, 'M', 'mw'),
- (0x33BE, 'M', 'kw'),
- (0x33BF, 'M', 'mw'),
- (0x33C0, 'M', 'kω'),
- (0x33C1, 'M', 'mω'),
+ (0x3390, 'M', u'hz'),
+ (0x3391, 'M', u'khz'),
+ (0x3392, 'M', u'mhz'),
+ (0x3393, 'M', u'ghz'),
+ (0x3394, 'M', u'thz'),
+ (0x3395, 'M', u'μl'),
+ (0x3396, 'M', u'ml'),
+ (0x3397, 'M', u'dl'),
+ (0x3398, 'M', u'kl'),
+ (0x3399, 'M', u'fm'),
+ (0x339A, 'M', u'nm'),
+ (0x339B, 'M', u'μm'),
+ (0x339C, 'M', u'mm'),
+ (0x339D, 'M', u'cm'),
+ (0x339E, 'M', u'km'),
+ (0x339F, 'M', u'mm2'),
+ (0x33A0, 'M', u'cm2'),
+ (0x33A1, 'M', u'm2'),
+ (0x33A2, 'M', u'km2'),
+ (0x33A3, 'M', u'mm3'),
+ (0x33A4, 'M', u'cm3'),
+ (0x33A5, 'M', u'm3'),
+ (0x33A6, 'M', u'km3'),
+ (0x33A7, 'M', u'm∕s'),
+ (0x33A8, 'M', u'm∕s2'),
+ (0x33A9, 'M', u'pa'),
+ (0x33AA, 'M', u'kpa'),
+ (0x33AB, 'M', u'mpa'),
+ (0x33AC, 'M', u'gpa'),
+ (0x33AD, 'M', u'rad'),
+ (0x33AE, 'M', u'rad∕s'),
+ (0x33AF, 'M', u'rad∕s2'),
+ (0x33B0, 'M', u'ps'),
+ (0x33B1, 'M', u'ns'),
+ (0x33B2, 'M', u'μs'),
+ (0x33B3, 'M', u'ms'),
+ (0x33B4, 'M', u'pv'),
+ (0x33B5, 'M', u'nv'),
+ (0x33B6, 'M', u'μv'),
+ (0x33B7, 'M', u'mv'),
+ (0x33B8, 'M', u'kv'),
+ (0x33B9, 'M', u'mv'),
+ (0x33BA, 'M', u'pw'),
+ (0x33BB, 'M', u'nw'),
+ (0x33BC, 'M', u'μw'),
+ (0x33BD, 'M', u'mw'),
+ (0x33BE, 'M', u'kw'),
+ (0x33BF, 'M', u'mw'),
+ (0x33C0, 'M', u'kω'),
+ (0x33C1, 'M', u'mω'),
(0x33C2, 'X'),
- (0x33C3, 'M', 'bq'),
- (0x33C4, 'M', 'cc'),
- (0x33C5, 'M', 'cd'),
- (0x33C6, 'M', 'c∕kg'),
+ (0x33C3, 'M', u'bq'),
+ (0x33C4, 'M', u'cc'),
+ (0x33C5, 'M', u'cd'),
+ (0x33C6, 'M', u'c∕kg'),
(0x33C7, 'X'),
- (0x33C8, 'M', 'db'),
- (0x33C9, 'M', 'gy'),
- (0x33CA, 'M', 'ha'),
- (0x33CB, 'M', 'hp'),
- (0x33CC, 'M', 'in'),
- (0x33CD, 'M', 'kk'),
- (0x33CE, 'M', 'km'),
- (0x33CF, 'M', 'kt'),
- (0x33D0, 'M', 'lm'),
- (0x33D1, 'M', 'ln'),
- (0x33D2, 'M', 'log'),
- (0x33D3, 'M', 'lx'),
- (0x33D4, 'M', 'mb'),
- (0x33D5, 'M', 'mil'),
- (0x33D6, 'M', 'mol'),
- (0x33D7, 'M', 'ph'),
+ (0x33C8, 'M', u'db'),
+ (0x33C9, 'M', u'gy'),
+ (0x33CA, 'M', u'ha'),
+ (0x33CB, 'M', u'hp'),
+ (0x33CC, 'M', u'in'),
+ (0x33CD, 'M', u'kk'),
+ (0x33CE, 'M', u'km'),
+ (0x33CF, 'M', u'kt'),
+ (0x33D0, 'M', u'lm'),
+ (0x33D1, 'M', u'ln'),
+ (0x33D2, 'M', u'log'),
+ (0x33D3, 'M', u'lx'),
+ (0x33D4, 'M', u'mb'),
+ (0x33D5, 'M', u'mil'),
+ (0x33D6, 'M', u'mol'),
+ (0x33D7, 'M', u'ph'),
(0x33D8, 'X'),
- (0x33D9, 'M', 'ppm'),
- (0x33DA, 'M', 'pr'),
- (0x33DB, 'M', 'sr'),
- (0x33DC, 'M', 'sv'),
- (0x33DD, 'M', 'wb'),
+ (0x33D9, 'M', u'ppm'),
+ (0x33DA, 'M', u'pr'),
+ (0x33DB, 'M', u'sr'),
+ (0x33DC, 'M', u'sv'),
+ (0x33DD, 'M', u'wb'),
+ (0x33DE, 'M', u'v∕m'),
+ (0x33DF, 'M', u'a∕m'),
+ (0x33E0, 'M', u'1日'),
+ (0x33E1, 'M', u'2日'),
+ (0x33E2, 'M', u'3日'),
+ (0x33E3, 'M', u'4日'),
+ (0x33E4, 'M', u'5日'),
+ (0x33E5, 'M', u'6日'),
+ (0x33E6, 'M', u'7日'),
+ (0x33E7, 'M', u'8日'),
+ (0x33E8, 'M', u'9日'),
+ (0x33E9, 'M', u'10日'),
+ (0x33EA, 'M', u'11日'),
+ (0x33EB, 'M', u'12日'),
+ (0x33EC, 'M', u'13日'),
+ (0x33ED, 'M', u'14日'),
+ (0x33EE, 'M', u'15日'),
+ (0x33EF, 'M', u'16日'),
+ (0x33F0, 'M', u'17日'),
+ (0x33F1, 'M', u'18日'),
+ (0x33F2, 'M', u'19日'),
+ (0x33F3, 'M', u'20日'),
]
-def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_35():
return [
- (0x33DE, 'M', 'v∕m'),
- (0x33DF, 'M', 'a∕m'),
- (0x33E0, 'M', '1日'),
- (0x33E1, 'M', '2日'),
- (0x33E2, 'M', '3日'),
- (0x33E3, 'M', '4日'),
- (0x33E4, 'M', '5日'),
- (0x33E5, 'M', '6日'),
- (0x33E6, 'M', '7日'),
- (0x33E7, 'M', '8日'),
- (0x33E8, 'M', '9日'),
- (0x33E9, 'M', '10日'),
- (0x33EA, 'M', '11日'),
- (0x33EB, 'M', '12日'),
- (0x33EC, 'M', '13日'),
- (0x33ED, 'M', '14日'),
- (0x33EE, 'M', '15日'),
- (0x33EF, 'M', '16日'),
- (0x33F0, 'M', '17日'),
- (0x33F1, 'M', '18日'),
- (0x33F2, 'M', '19日'),
- (0x33F3, 'M', '20日'),
- (0x33F4, 'M', '21日'),
- (0x33F5, 'M', '22日'),
- (0x33F6, 'M', '23日'),
- (0x33F7, 'M', '24日'),
- (0x33F8, 'M', '25日'),
- (0x33F9, 'M', '26日'),
- (0x33FA, 'M', '27日'),
- (0x33FB, 'M', '28日'),
- (0x33FC, 'M', '29日'),
- (0x33FD, 'M', '30日'),
- (0x33FE, 'M', '31日'),
- (0x33FF, 'M', 'gal'),
+ (0x33F4, 'M', u'21日'),
+ (0x33F5, 'M', u'22日'),
+ (0x33F6, 'M', u'23日'),
+ (0x33F7, 'M', u'24日'),
+ (0x33F8, 'M', u'25日'),
+ (0x33F9, 'M', u'26日'),
+ (0x33FA, 'M', u'27日'),
+ (0x33FB, 'M', u'28日'),
+ (0x33FC, 'M', u'29日'),
+ (0x33FD, 'M', u'30日'),
+ (0x33FE, 'M', u'31日'),
+ (0x33FF, 'M', u'gal'),
(0x3400, 'V'),
+ (0x4DB6, 'X'),
+ (0x4DC0, 'V'),
+ (0x9FF0, 'X'),
+ (0xA000, 'V'),
(0xA48D, 'X'),
(0xA490, 'V'),
(0xA4C7, 'X'),
(0xA4D0, 'V'),
(0xA62C, 'X'),
- (0xA640, 'M', 'ꙁ'),
+ (0xA640, 'M', u'ꙁ'),
(0xA641, 'V'),
- (0xA642, 'M', 'ꙃ'),
+ (0xA642, 'M', u'ꙃ'),
(0xA643, 'V'),
- (0xA644, 'M', 'ꙅ'),
+ (0xA644, 'M', u'ꙅ'),
(0xA645, 'V'),
- (0xA646, 'M', 'ꙇ'),
+ (0xA646, 'M', u'ꙇ'),
(0xA647, 'V'),
- (0xA648, 'M', 'ꙉ'),
+ (0xA648, 'M', u'ꙉ'),
(0xA649, 'V'),
- (0xA64A, 'M', 'ꙋ'),
+ (0xA64A, 'M', u'ꙋ'),
(0xA64B, 'V'),
- (0xA64C, 'M', 'ꙍ'),
+ (0xA64C, 'M', u'ꙍ'),
(0xA64D, 'V'),
- (0xA64E, 'M', 'ꙏ'),
+ (0xA64E, 'M', u'ꙏ'),
(0xA64F, 'V'),
- (0xA650, 'M', 'ꙑ'),
+ (0xA650, 'M', u'ꙑ'),
(0xA651, 'V'),
- (0xA652, 'M', 'ꙓ'),
+ (0xA652, 'M', u'ꙓ'),
(0xA653, 'V'),
- (0xA654, 'M', 'ꙕ'),
+ (0xA654, 'M', u'ꙕ'),
(0xA655, 'V'),
- (0xA656, 'M', 'ꙗ'),
+ (0xA656, 'M', u'ꙗ'),
(0xA657, 'V'),
- (0xA658, 'M', 'ꙙ'),
+ (0xA658, 'M', u'ꙙ'),
(0xA659, 'V'),
- (0xA65A, 'M', 'ꙛ'),
+ (0xA65A, 'M', u'ꙛ'),
(0xA65B, 'V'),
- (0xA65C, 'M', 'ꙝ'),
+ (0xA65C, 'M', u'ꙝ'),
(0xA65D, 'V'),
- (0xA65E, 'M', 'ꙟ'),
+ (0xA65E, 'M', u'ꙟ'),
(0xA65F, 'V'),
- (0xA660, 'M', 'ꙡ'),
+ (0xA660, 'M', u'ꙡ'),
(0xA661, 'V'),
- (0xA662, 'M', 'ꙣ'),
+ (0xA662, 'M', u'ꙣ'),
(0xA663, 'V'),
- (0xA664, 'M', 'ꙥ'),
+ (0xA664, 'M', u'ꙥ'),
(0xA665, 'V'),
- (0xA666, 'M', 'ꙧ'),
+ (0xA666, 'M', u'ꙧ'),
(0xA667, 'V'),
- (0xA668, 'M', 'ꙩ'),
+ (0xA668, 'M', u'ꙩ'),
(0xA669, 'V'),
- (0xA66A, 'M', 'ꙫ'),
+ (0xA66A, 'M', u'ꙫ'),
(0xA66B, 'V'),
- (0xA66C, 'M', 'ꙭ'),
+ (0xA66C, 'M', u'ꙭ'),
(0xA66D, 'V'),
- (0xA680, 'M', 'ꚁ'),
+ (0xA680, 'M', u'ꚁ'),
(0xA681, 'V'),
- (0xA682, 'M', 'ꚃ'),
+ (0xA682, 'M', u'ꚃ'),
(0xA683, 'V'),
- (0xA684, 'M', 'ꚅ'),
+ (0xA684, 'M', u'ꚅ'),
(0xA685, 'V'),
- (0xA686, 'M', 'ꚇ'),
+ (0xA686, 'M', u'ꚇ'),
(0xA687, 'V'),
- (0xA688, 'M', 'ꚉ'),
+ (0xA688, 'M', u'ꚉ'),
(0xA689, 'V'),
- (0xA68A, 'M', 'ꚋ'),
+ (0xA68A, 'M', u'ꚋ'),
(0xA68B, 'V'),
- (0xA68C, 'M', 'ꚍ'),
+ (0xA68C, 'M', u'ꚍ'),
(0xA68D, 'V'),
- ]
-
-def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0xA68E, 'M', 'ꚏ'),
+ (0xA68E, 'M', u'ꚏ'),
(0xA68F, 'V'),
- (0xA690, 'M', 'ꚑ'),
+ (0xA690, 'M', u'ꚑ'),
(0xA691, 'V'),
- (0xA692, 'M', 'ꚓ'),
+ (0xA692, 'M', u'ꚓ'),
(0xA693, 'V'),
- (0xA694, 'M', 'ꚕ'),
+ (0xA694, 'M', u'ꚕ'),
(0xA695, 'V'),
- (0xA696, 'M', 'ꚗ'),
+ (0xA696, 'M', u'ꚗ'),
(0xA697, 'V'),
- (0xA698, 'M', 'ꚙ'),
+ (0xA698, 'M', u'ꚙ'),
(0xA699, 'V'),
- (0xA69A, 'M', 'ꚛ'),
+ (0xA69A, 'M', u'ꚛ'),
(0xA69B, 'V'),
- (0xA69C, 'M', 'ъ'),
- (0xA69D, 'M', 'ь'),
+ (0xA69C, 'M', u'ъ'),
+ (0xA69D, 'M', u'ь'),
(0xA69E, 'V'),
(0xA6F8, 'X'),
+ ]
+
+def _seg_36():
+ return [
(0xA700, 'V'),
- (0xA722, 'M', 'ꜣ'),
+ (0xA722, 'M', u'ꜣ'),
(0xA723, 'V'),
- (0xA724, 'M', 'ꜥ'),
+ (0xA724, 'M', u'ꜥ'),
(0xA725, 'V'),
- (0xA726, 'M', 'ꜧ'),
+ (0xA726, 'M', u'ꜧ'),
(0xA727, 'V'),
- (0xA728, 'M', 'ꜩ'),
+ (0xA728, 'M', u'ꜩ'),
(0xA729, 'V'),
- (0xA72A, 'M', 'ꜫ'),
+ (0xA72A, 'M', u'ꜫ'),
(0xA72B, 'V'),
- (0xA72C, 'M', 'ꜭ'),
+ (0xA72C, 'M', u'ꜭ'),
(0xA72D, 'V'),
- (0xA72E, 'M', 'ꜯ'),
+ (0xA72E, 'M', u'ꜯ'),
(0xA72F, 'V'),
- (0xA732, 'M', 'ꜳ'),
+ (0xA732, 'M', u'ꜳ'),
(0xA733, 'V'),
- (0xA734, 'M', 'ꜵ'),
+ (0xA734, 'M', u'ꜵ'),
(0xA735, 'V'),
- (0xA736, 'M', 'ꜷ'),
+ (0xA736, 'M', u'ꜷ'),
(0xA737, 'V'),
- (0xA738, 'M', 'ꜹ'),
+ (0xA738, 'M', u'ꜹ'),
(0xA739, 'V'),
- (0xA73A, 'M', 'ꜻ'),
+ (0xA73A, 'M', u'ꜻ'),
(0xA73B, 'V'),
- (0xA73C, 'M', 'ꜽ'),
+ (0xA73C, 'M', u'ꜽ'),
(0xA73D, 'V'),
- (0xA73E, 'M', 'ꜿ'),
+ (0xA73E, 'M', u'ꜿ'),
(0xA73F, 'V'),
- (0xA740, 'M', 'ꝁ'),
+ (0xA740, 'M', u'ꝁ'),
(0xA741, 'V'),
- (0xA742, 'M', 'ꝃ'),
+ (0xA742, 'M', u'ꝃ'),
(0xA743, 'V'),
- (0xA744, 'M', 'ꝅ'),
+ (0xA744, 'M', u'ꝅ'),
(0xA745, 'V'),
- (0xA746, 'M', 'ꝇ'),
+ (0xA746, 'M', u'ꝇ'),
(0xA747, 'V'),
- (0xA748, 'M', 'ꝉ'),
+ (0xA748, 'M', u'ꝉ'),
(0xA749, 'V'),
- (0xA74A, 'M', 'ꝋ'),
+ (0xA74A, 'M', u'ꝋ'),
(0xA74B, 'V'),
- (0xA74C, 'M', 'ꝍ'),
+ (0xA74C, 'M', u'ꝍ'),
(0xA74D, 'V'),
- (0xA74E, 'M', 'ꝏ'),
+ (0xA74E, 'M', u'ꝏ'),
(0xA74F, 'V'),
- (0xA750, 'M', 'ꝑ'),
+ (0xA750, 'M', u'ꝑ'),
(0xA751, 'V'),
- (0xA752, 'M', 'ꝓ'),
+ (0xA752, 'M', u'ꝓ'),
(0xA753, 'V'),
- (0xA754, 'M', 'ꝕ'),
+ (0xA754, 'M', u'ꝕ'),
(0xA755, 'V'),
- (0xA756, 'M', 'ꝗ'),
+ (0xA756, 'M', u'ꝗ'),
(0xA757, 'V'),
- (0xA758, 'M', 'ꝙ'),
+ (0xA758, 'M', u'ꝙ'),
(0xA759, 'V'),
- (0xA75A, 'M', 'ꝛ'),
+ (0xA75A, 'M', u'ꝛ'),
(0xA75B, 'V'),
- (0xA75C, 'M', 'ꝝ'),
+ (0xA75C, 'M', u'ꝝ'),
(0xA75D, 'V'),
- (0xA75E, 'M', 'ꝟ'),
+ (0xA75E, 'M', u'ꝟ'),
(0xA75F, 'V'),
- (0xA760, 'M', 'ꝡ'),
+ (0xA760, 'M', u'ꝡ'),
(0xA761, 'V'),
- (0xA762, 'M', 'ꝣ'),
+ (0xA762, 'M', u'ꝣ'),
(0xA763, 'V'),
- (0xA764, 'M', 'ꝥ'),
+ (0xA764, 'M', u'ꝥ'),
(0xA765, 'V'),
- (0xA766, 'M', 'ꝧ'),
+ (0xA766, 'M', u'ꝧ'),
(0xA767, 'V'),
- (0xA768, 'M', 'ꝩ'),
+ (0xA768, 'M', u'ꝩ'),
(0xA769, 'V'),
- (0xA76A, 'M', 'ꝫ'),
+ (0xA76A, 'M', u'ꝫ'),
(0xA76B, 'V'),
- (0xA76C, 'M', 'ꝭ'),
+ (0xA76C, 'M', u'ꝭ'),
(0xA76D, 'V'),
- (0xA76E, 'M', 'ꝯ'),
+ (0xA76E, 'M', u'ꝯ'),
(0xA76F, 'V'),
- (0xA770, 'M', 'ꝯ'),
+ (0xA770, 'M', u'ꝯ'),
(0xA771, 'V'),
- (0xA779, 'M', 'ꝺ'),
+ (0xA779, 'M', u'ꝺ'),
(0xA77A, 'V'),
- (0xA77B, 'M', 'ꝼ'),
- ]
-
-def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
+ (0xA77B, 'M', u'ꝼ'),
(0xA77C, 'V'),
- (0xA77D, 'M', 'ᵹ'),
- (0xA77E, 'M', 'ꝿ'),
+ (0xA77D, 'M', u'ᵹ'),
+ (0xA77E, 'M', u'ꝿ'),
(0xA77F, 'V'),
- (0xA780, 'M', 'ꞁ'),
+ (0xA780, 'M', u'ꞁ'),
(0xA781, 'V'),
- (0xA782, 'M', 'ꞃ'),
+ (0xA782, 'M', u'ꞃ'),
(0xA783, 'V'),
- (0xA784, 'M', 'ꞅ'),
+ (0xA784, 'M', u'ꞅ'),
(0xA785, 'V'),
- (0xA786, 'M', 'ꞇ'),
+ (0xA786, 'M', u'ꞇ'),
(0xA787, 'V'),
- (0xA78B, 'M', 'ꞌ'),
+ (0xA78B, 'M', u'ꞌ'),
(0xA78C, 'V'),
- (0xA78D, 'M', 'ɥ'),
+ (0xA78D, 'M', u'ɥ'),
(0xA78E, 'V'),
- (0xA790, 'M', 'ꞑ'),
+ (0xA790, 'M', u'ꞑ'),
(0xA791, 'V'),
- (0xA792, 'M', 'ꞓ'),
+ ]
+
+def _seg_37():
+ return [
+ (0xA792, 'M', u'ꞓ'),
(0xA793, 'V'),
- (0xA796, 'M', 'ꞗ'),
+ (0xA796, 'M', u'ꞗ'),
(0xA797, 'V'),
- (0xA798, 'M', 'ꞙ'),
+ (0xA798, 'M', u'ꞙ'),
(0xA799, 'V'),
- (0xA79A, 'M', 'ꞛ'),
+ (0xA79A, 'M', u'ꞛ'),
(0xA79B, 'V'),
- (0xA79C, 'M', 'ꞝ'),
+ (0xA79C, 'M', u'ꞝ'),
(0xA79D, 'V'),
- (0xA79E, 'M', 'ꞟ'),
+ (0xA79E, 'M', u'ꞟ'),
(0xA79F, 'V'),
- (0xA7A0, 'M', 'ꞡ'),
+ (0xA7A0, 'M', u'ꞡ'),
(0xA7A1, 'V'),
- (0xA7A2, 'M', 'ꞣ'),
+ (0xA7A2, 'M', u'ꞣ'),
(0xA7A3, 'V'),
- (0xA7A4, 'M', 'ꞥ'),
+ (0xA7A4, 'M', u'ꞥ'),
(0xA7A5, 'V'),
- (0xA7A6, 'M', 'ꞧ'),
+ (0xA7A6, 'M', u'ꞧ'),
(0xA7A7, 'V'),
- (0xA7A8, 'M', 'ꞩ'),
+ (0xA7A8, 'M', u'ꞩ'),
(0xA7A9, 'V'),
- (0xA7AA, 'M', 'ɦ'),
- (0xA7AB, 'M', 'ɜ'),
- (0xA7AC, 'M', 'ɡ'),
- (0xA7AD, 'M', 'ɬ'),
- (0xA7AE, 'M', 'ɪ'),
+ (0xA7AA, 'M', u'ɦ'),
+ (0xA7AB, 'M', u'ɜ'),
+ (0xA7AC, 'M', u'ɡ'),
+ (0xA7AD, 'M', u'ɬ'),
+ (0xA7AE, 'M', u'ɪ'),
(0xA7AF, 'V'),
- (0xA7B0, 'M', 'ʞ'),
- (0xA7B1, 'M', 'ʇ'),
- (0xA7B2, 'M', 'ʝ'),
- (0xA7B3, 'M', 'ꭓ'),
- (0xA7B4, 'M', 'ꞵ'),
+ (0xA7B0, 'M', u'ʞ'),
+ (0xA7B1, 'M', u'ʇ'),
+ (0xA7B2, 'M', u'ʝ'),
+ (0xA7B3, 'M', u'ꭓ'),
+ (0xA7B4, 'M', u'ꞵ'),
(0xA7B5, 'V'),
- (0xA7B6, 'M', 'ꞷ'),
+ (0xA7B6, 'M', u'ꞷ'),
(0xA7B7, 'V'),
- (0xA7B8, 'M', 'ꞹ'),
+ (0xA7B8, 'X'),
(0xA7B9, 'V'),
- (0xA7BA, 'M', 'ꞻ'),
- (0xA7BB, 'V'),
- (0xA7BC, 'M', 'ꞽ'),
- (0xA7BD, 'V'),
- (0xA7BE, 'M', 'ꞿ'),
- (0xA7BF, 'V'),
- (0xA7C0, 'M', 'ꟁ'),
- (0xA7C1, 'V'),
- (0xA7C2, 'M', 'ꟃ'),
- (0xA7C3, 'V'),
- (0xA7C4, 'M', 'ꞔ'),
- (0xA7C5, 'M', 'ʂ'),
- (0xA7C6, 'M', 'ᶎ'),
- (0xA7C7, 'M', 'ꟈ'),
- (0xA7C8, 'V'),
- (0xA7C9, 'M', 'ꟊ'),
- (0xA7CA, 'V'),
- (0xA7CB, 'X'),
- (0xA7D0, 'M', 'ꟑ'),
- (0xA7D1, 'V'),
- (0xA7D2, 'X'),
- (0xA7D3, 'V'),
- (0xA7D4, 'X'),
- (0xA7D5, 'V'),
- (0xA7D6, 'M', 'ꟗ'),
- (0xA7D7, 'V'),
- (0xA7D8, 'M', 'ꟙ'),
- (0xA7D9, 'V'),
- (0xA7DA, 'X'),
- (0xA7F2, 'M', 'c'),
- (0xA7F3, 'M', 'f'),
- (0xA7F4, 'M', 'q'),
- (0xA7F5, 'M', 'ꟶ'),
- (0xA7F6, 'V'),
- (0xA7F8, 'M', 'ħ'),
- (0xA7F9, 'M', 'œ'),
+ (0xA7BA, 'X'),
+ (0xA7F7, 'V'),
+ (0xA7F8, 'M', u'ħ'),
+ (0xA7F9, 'M', u'œ'),
(0xA7FA, 'V'),
- (0xA82D, 'X'),
+ (0xA82C, 'X'),
(0xA830, 'V'),
(0xA83A, 'X'),
(0xA840, 'V'),
(0xA878, 'X'),
(0xA880, 'V'),
(0xA8C6, 'X'),
- ]
-
-def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
(0xA8CE, 'V'),
(0xA8DA, 'X'),
(0xA8E0, 'V'),
@@ -3995,98 +3938,96 @@ def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xAB28, 'V'),
(0xAB2F, 'X'),
(0xAB30, 'V'),
- (0xAB5C, 'M', 'ꜧ'),
- (0xAB5D, 'M', 'ꬷ'),
- (0xAB5E, 'M', 'ɫ'),
- (0xAB5F, 'M', 'ꭒ'),
+ (0xAB5C, 'M', u'ꜧ'),
+ (0xAB5D, 'M', u'ꬷ'),
+ (0xAB5E, 'M', u'ɫ'),
+ (0xAB5F, 'M', u'ꭒ'),
(0xAB60, 'V'),
- (0xAB69, 'M', 'ʍ'),
- (0xAB6A, 'V'),
- (0xAB6C, 'X'),
- (0xAB70, 'M', 'Ꭰ'),
- (0xAB71, 'M', 'Ꭱ'),
- (0xAB72, 'M', 'Ꭲ'),
- (0xAB73, 'M', 'Ꭳ'),
- (0xAB74, 'M', 'Ꭴ'),
- (0xAB75, 'M', 'Ꭵ'),
- (0xAB76, 'M', 'Ꭶ'),
- (0xAB77, 'M', 'Ꭷ'),
- (0xAB78, 'M', 'Ꭸ'),
- (0xAB79, 'M', 'Ꭹ'),
- (0xAB7A, 'M', 'Ꭺ'),
- (0xAB7B, 'M', 'Ꭻ'),
- (0xAB7C, 'M', 'Ꭼ'),
- (0xAB7D, 'M', 'Ꭽ'),
- (0xAB7E, 'M', 'Ꭾ'),
- (0xAB7F, 'M', 'Ꭿ'),
- (0xAB80, 'M', 'Ꮀ'),
- (0xAB81, 'M', 'Ꮁ'),
- (0xAB82, 'M', 'Ꮂ'),
- (0xAB83, 'M', 'Ꮃ'),
- (0xAB84, 'M', 'Ꮄ'),
- (0xAB85, 'M', 'Ꮅ'),
- (0xAB86, 'M', 'Ꮆ'),
- (0xAB87, 'M', 'Ꮇ'),
- (0xAB88, 'M', 'Ꮈ'),
- (0xAB89, 'M', 'Ꮉ'),
- (0xAB8A, 'M', 'Ꮊ'),
- (0xAB8B, 'M', 'Ꮋ'),
- (0xAB8C, 'M', 'Ꮌ'),
- (0xAB8D, 'M', 'Ꮍ'),
- (0xAB8E, 'M', 'Ꮎ'),
- (0xAB8F, 'M', 'Ꮏ'),
- (0xAB90, 'M', 'Ꮐ'),
- (0xAB91, 'M', 'Ꮑ'),
- (0xAB92, 'M', 'Ꮒ'),
- (0xAB93, 'M', 'Ꮓ'),
- (0xAB94, 'M', 'Ꮔ'),
- (0xAB95, 'M', 'Ꮕ'),
- (0xAB96, 'M', 'Ꮖ'),
- (0xAB97, 'M', 'Ꮗ'),
- (0xAB98, 'M', 'Ꮘ'),
- (0xAB99, 'M', 'Ꮙ'),
- (0xAB9A, 'M', 'Ꮚ'),
- (0xAB9B, 'M', 'Ꮛ'),
- (0xAB9C, 'M', 'Ꮜ'),
- (0xAB9D, 'M', 'Ꮝ'),
- (0xAB9E, 'M', 'Ꮞ'),
- (0xAB9F, 'M', 'Ꮟ'),
- (0xABA0, 'M', 'Ꮠ'),
- (0xABA1, 'M', 'Ꮡ'),
- (0xABA2, 'M', 'Ꮢ'),
- (0xABA3, 'M', 'Ꮣ'),
- (0xABA4, 'M', 'Ꮤ'),
- (0xABA5, 'M', 'Ꮥ'),
- (0xABA6, 'M', 'Ꮦ'),
- (0xABA7, 'M', 'Ꮧ'),
- (0xABA8, 'M', 'Ꮨ'),
- (0xABA9, 'M', 'Ꮩ'),
- (0xABAA, 'M', 'Ꮪ'),
+ (0xAB66, 'X'),
+ (0xAB70, 'M', u'Ꭰ'),
+ (0xAB71, 'M', u'Ꭱ'),
+ (0xAB72, 'M', u'Ꭲ'),
+ (0xAB73, 'M', u'Ꭳ'),
+ (0xAB74, 'M', u'Ꭴ'),
+ (0xAB75, 'M', u'Ꭵ'),
+ (0xAB76, 'M', u'Ꭶ'),
+ (0xAB77, 'M', u'Ꭷ'),
+ (0xAB78, 'M', u'Ꭸ'),
+ (0xAB79, 'M', u'Ꭹ'),
+ (0xAB7A, 'M', u'Ꭺ'),
]
-def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_38():
return [
- (0xABAB, 'M', 'Ꮫ'),
- (0xABAC, 'M', 'Ꮬ'),
- (0xABAD, 'M', 'Ꮭ'),
- (0xABAE, 'M', 'Ꮮ'),
- (0xABAF, 'M', 'Ꮯ'),
- (0xABB0, 'M', 'Ꮰ'),
- (0xABB1, 'M', 'Ꮱ'),
- (0xABB2, 'M', 'Ꮲ'),
- (0xABB3, 'M', 'Ꮳ'),
- (0xABB4, 'M', 'Ꮴ'),
- (0xABB5, 'M', 'Ꮵ'),
- (0xABB6, 'M', 'Ꮶ'),
- (0xABB7, 'M', 'Ꮷ'),
- (0xABB8, 'M', 'Ꮸ'),
- (0xABB9, 'M', 'Ꮹ'),
- (0xABBA, 'M', 'Ꮺ'),
- (0xABBB, 'M', 'Ꮻ'),
- (0xABBC, 'M', 'Ꮼ'),
- (0xABBD, 'M', 'Ꮽ'),
- (0xABBE, 'M', 'Ꮾ'),
- (0xABBF, 'M', 'Ꮿ'),
+ (0xAB7B, 'M', u'Ꭻ'),
+ (0xAB7C, 'M', u'Ꭼ'),
+ (0xAB7D, 'M', u'Ꭽ'),
+ (0xAB7E, 'M', u'Ꭾ'),
+ (0xAB7F, 'M', u'Ꭿ'),
+ (0xAB80, 'M', u'Ꮀ'),
+ (0xAB81, 'M', u'Ꮁ'),
+ (0xAB82, 'M', u'Ꮂ'),
+ (0xAB83, 'M', u'Ꮃ'),
+ (0xAB84, 'M', u'Ꮄ'),
+ (0xAB85, 'M', u'Ꮅ'),
+ (0xAB86, 'M', u'Ꮆ'),
+ (0xAB87, 'M', u'Ꮇ'),
+ (0xAB88, 'M', u'Ꮈ'),
+ (0xAB89, 'M', u'Ꮉ'),
+ (0xAB8A, 'M', u'Ꮊ'),
+ (0xAB8B, 'M', u'Ꮋ'),
+ (0xAB8C, 'M', u'Ꮌ'),
+ (0xAB8D, 'M', u'Ꮍ'),
+ (0xAB8E, 'M', u'Ꮎ'),
+ (0xAB8F, 'M', u'Ꮏ'),
+ (0xAB90, 'M', u'Ꮐ'),
+ (0xAB91, 'M', u'Ꮑ'),
+ (0xAB92, 'M', u'Ꮒ'),
+ (0xAB93, 'M', u'Ꮓ'),
+ (0xAB94, 'M', u'Ꮔ'),
+ (0xAB95, 'M', u'Ꮕ'),
+ (0xAB96, 'M', u'Ꮖ'),
+ (0xAB97, 'M', u'Ꮗ'),
+ (0xAB98, 'M', u'Ꮘ'),
+ (0xAB99, 'M', u'Ꮙ'),
+ (0xAB9A, 'M', u'Ꮚ'),
+ (0xAB9B, 'M', u'Ꮛ'),
+ (0xAB9C, 'M', u'Ꮜ'),
+ (0xAB9D, 'M', u'Ꮝ'),
+ (0xAB9E, 'M', u'Ꮞ'),
+ (0xAB9F, 'M', u'Ꮟ'),
+ (0xABA0, 'M', u'Ꮠ'),
+ (0xABA1, 'M', u'Ꮡ'),
+ (0xABA2, 'M', u'Ꮢ'),
+ (0xABA3, 'M', u'Ꮣ'),
+ (0xABA4, 'M', u'Ꮤ'),
+ (0xABA5, 'M', u'Ꮥ'),
+ (0xABA6, 'M', u'Ꮦ'),
+ (0xABA7, 'M', u'Ꮧ'),
+ (0xABA8, 'M', u'Ꮨ'),
+ (0xABA9, 'M', u'Ꮩ'),
+ (0xABAA, 'M', u'Ꮪ'),
+ (0xABAB, 'M', u'Ꮫ'),
+ (0xABAC, 'M', u'Ꮬ'),
+ (0xABAD, 'M', u'Ꮭ'),
+ (0xABAE, 'M', u'Ꮮ'),
+ (0xABAF, 'M', u'Ꮯ'),
+ (0xABB0, 'M', u'Ꮰ'),
+ (0xABB1, 'M', u'Ꮱ'),
+ (0xABB2, 'M', u'Ꮲ'),
+ (0xABB3, 'M', u'Ꮳ'),
+ (0xABB4, 'M', u'Ꮴ'),
+ (0xABB5, 'M', u'Ꮵ'),
+ (0xABB6, 'M', u'Ꮶ'),
+ (0xABB7, 'M', u'Ꮷ'),
+ (0xABB8, 'M', u'Ꮸ'),
+ (0xABB9, 'M', u'Ꮹ'),
+ (0xABBA, 'M', u'Ꮺ'),
+ (0xABBB, 'M', u'Ꮻ'),
+ (0xABBC, 'M', u'Ꮼ'),
+ (0xABBD, 'M', u'Ꮽ'),
+ (0xABBE, 'M', u'Ꮾ'),
+ (0xABBF, 'M', u'Ꮿ'),
(0xABC0, 'V'),
(0xABEE, 'X'),
(0xABF0, 'V'),
@@ -4097,1436 +4038,1436 @@ def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0xD7C7, 'X'),
(0xD7CB, 'V'),
(0xD7FC, 'X'),
- (0xF900, 'M', '豈'),
- (0xF901, 'M', '更'),
- (0xF902, 'M', '車'),
- (0xF903, 'M', '賈'),
- (0xF904, 'M', '滑'),
- (0xF905, 'M', '串'),
- (0xF906, 'M', '句'),
- (0xF907, 'M', '龜'),
- (0xF909, 'M', '契'),
- (0xF90A, 'M', '金'),
- (0xF90B, 'M', '喇'),
- (0xF90C, 'M', '奈'),
- (0xF90D, 'M', '懶'),
- (0xF90E, 'M', '癩'),
- (0xF90F, 'M', '羅'),
- (0xF910, 'M', '蘿'),
- (0xF911, 'M', '螺'),
- (0xF912, 'M', '裸'),
- (0xF913, 'M', '邏'),
- (0xF914, 'M', '樂'),
- (0xF915, 'M', '洛'),
- (0xF916, 'M', '烙'),
- (0xF917, 'M', '珞'),
- (0xF918, 'M', '落'),
- (0xF919, 'M', '酪'),
- (0xF91A, 'M', '駱'),
- (0xF91B, 'M', '亂'),
- (0xF91C, 'M', '卵'),
- (0xF91D, 'M', '欄'),
- (0xF91E, 'M', '爛'),
- (0xF91F, 'M', '蘭'),
- (0xF920, 'M', '鸞'),
- (0xF921, 'M', '嵐'),
- (0xF922, 'M', '濫'),
- (0xF923, 'M', '藍'),
- (0xF924, 'M', '襤'),
- (0xF925, 'M', '拉'),
- (0xF926, 'M', '臘'),
- (0xF927, 'M', '蠟'),
- (0xF928, 'M', '廊'),
- (0xF929, 'M', '朗'),
- (0xF92A, 'M', '浪'),
- (0xF92B, 'M', '狼'),
- (0xF92C, 'M', '郎'),
- (0xF92D, 'M', '來'),
- (0xF92E, 'M', '冷'),
- (0xF92F, 'M', '勞'),
- (0xF930, 'M', '擄'),
- (0xF931, 'M', '櫓'),
- (0xF932, 'M', '爐'),
- (0xF933, 'M', '盧'),
- (0xF934, 'M', '老'),
- (0xF935, 'M', '蘆'),
- (0xF936, 'M', '虜'),
- (0xF937, 'M', '路'),
- (0xF938, 'M', '露'),
- (0xF939, 'M', '魯'),
- (0xF93A, 'M', '鷺'),
- (0xF93B, 'M', '碌'),
- (0xF93C, 'M', '祿'),
- (0xF93D, 'M', '綠'),
- (0xF93E, 'M', '菉'),
- (0xF93F, 'M', '錄'),
- (0xF940, 'M', '鹿'),
- (0xF941, 'M', '論'),
- (0xF942, 'M', '壟'),
- (0xF943, 'M', '弄'),
- (0xF944, 'M', '籠'),
- (0xF945, 'M', '聾'),
+ (0xF900, 'M', u'豈'),
+ (0xF901, 'M', u'更'),
+ (0xF902, 'M', u'車'),
+ (0xF903, 'M', u'賈'),
+ (0xF904, 'M', u'滑'),
+ (0xF905, 'M', u'串'),
+ (0xF906, 'M', u'句'),
+ (0xF907, 'M', u'龜'),
+ (0xF909, 'M', u'契'),
+ (0xF90A, 'M', u'金'),
+ (0xF90B, 'M', u'喇'),
+ (0xF90C, 'M', u'奈'),
+ (0xF90D, 'M', u'懶'),
+ (0xF90E, 'M', u'癩'),
+ (0xF90F, 'M', u'羅'),
+ (0xF910, 'M', u'蘿'),
+ (0xF911, 'M', u'螺'),
+ (0xF912, 'M', u'裸'),
+ (0xF913, 'M', u'邏'),
+ (0xF914, 'M', u'樂'),
+ (0xF915, 'M', u'洛'),
]
-def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_39():
return [
- (0xF946, 'M', '牢'),
- (0xF947, 'M', '磊'),
- (0xF948, 'M', '賂'),
- (0xF949, 'M', '雷'),
- (0xF94A, 'M', '壘'),
- (0xF94B, 'M', '屢'),
- (0xF94C, 'M', '樓'),
- (0xF94D, 'M', '淚'),
- (0xF94E, 'M', '漏'),
- (0xF94F, 'M', '累'),
- (0xF950, 'M', '縷'),
- (0xF951, 'M', '陋'),
- (0xF952, 'M', '勒'),
- (0xF953, 'M', '肋'),
- (0xF954, 'M', '凜'),
- (0xF955, 'M', '凌'),
- (0xF956, 'M', '稜'),
- (0xF957, 'M', '綾'),
- (0xF958, 'M', '菱'),
- (0xF959, 'M', '陵'),
- (0xF95A, 'M', '讀'),
- (0xF95B, 'M', '拏'),
- (0xF95C, 'M', '樂'),
- (0xF95D, 'M', '諾'),
- (0xF95E, 'M', '丹'),
- (0xF95F, 'M', '寧'),
- (0xF960, 'M', '怒'),
- (0xF961, 'M', '率'),
- (0xF962, 'M', '異'),
- (0xF963, 'M', '北'),
- (0xF964, 'M', '磻'),
- (0xF965, 'M', '便'),
- (0xF966, 'M', '復'),
- (0xF967, 'M', '不'),
- (0xF968, 'M', '泌'),
- (0xF969, 'M', '數'),
- (0xF96A, 'M', '索'),
- (0xF96B, 'M', '參'),
- (0xF96C, 'M', '塞'),
- (0xF96D, 'M', '省'),
- (0xF96E, 'M', '葉'),
- (0xF96F, 'M', '說'),
- (0xF970, 'M', '殺'),
- (0xF971, 'M', '辰'),
- (0xF972, 'M', '沈'),
- (0xF973, 'M', '拾'),
- (0xF974, 'M', '若'),
- (0xF975, 'M', '掠'),
- (0xF976, 'M', '略'),
- (0xF977, 'M', '亮'),
- (0xF978, 'M', '兩'),
- (0xF979, 'M', '凉'),
- (0xF97A, 'M', '梁'),
- (0xF97B, 'M', '糧'),
- (0xF97C, 'M', '良'),
- (0xF97D, 'M', '諒'),
- (0xF97E, 'M', '量'),
- (0xF97F, 'M', '勵'),
- (0xF980, 'M', '呂'),
- (0xF981, 'M', '女'),
- (0xF982, 'M', '廬'),
- (0xF983, 'M', '旅'),
- (0xF984, 'M', '濾'),
- (0xF985, 'M', '礪'),
- (0xF986, 'M', '閭'),
- (0xF987, 'M', '驪'),
- (0xF988, 'M', '麗'),
- (0xF989, 'M', '黎'),
- (0xF98A, 'M', '力'),
- (0xF98B, 'M', '曆'),
- (0xF98C, 'M', '歷'),
- (0xF98D, 'M', '轢'),
- (0xF98E, 'M', '年'),
- (0xF98F, 'M', '憐'),
- (0xF990, 'M', '戀'),
- (0xF991, 'M', '撚'),
- (0xF992, 'M', '漣'),
- (0xF993, 'M', '煉'),
- (0xF994, 'M', '璉'),
- (0xF995, 'M', '秊'),
- (0xF996, 'M', '練'),
- (0xF997, 'M', '聯'),
- (0xF998, 'M', '輦'),
- (0xF999, 'M', '蓮'),
- (0xF99A, 'M', '連'),
- (0xF99B, 'M', '鍊'),
- (0xF99C, 'M', '列'),
- (0xF99D, 'M', '劣'),
- (0xF99E, 'M', '咽'),
- (0xF99F, 'M', '烈'),
- (0xF9A0, 'M', '裂'),
- (0xF9A1, 'M', '說'),
- (0xF9A2, 'M', '廉'),
- (0xF9A3, 'M', '念'),
- (0xF9A4, 'M', '捻'),
- (0xF9A5, 'M', '殮'),
- (0xF9A6, 'M', '簾'),
- (0xF9A7, 'M', '獵'),
- (0xF9A8, 'M', '令'),
- (0xF9A9, 'M', '囹'),
+ (0xF916, 'M', u'烙'),
+ (0xF917, 'M', u'珞'),
+ (0xF918, 'M', u'落'),
+ (0xF919, 'M', u'酪'),
+ (0xF91A, 'M', u'駱'),
+ (0xF91B, 'M', u'亂'),
+ (0xF91C, 'M', u'卵'),
+ (0xF91D, 'M', u'欄'),
+ (0xF91E, 'M', u'爛'),
+ (0xF91F, 'M', u'蘭'),
+ (0xF920, 'M', u'鸞'),
+ (0xF921, 'M', u'嵐'),
+ (0xF922, 'M', u'濫'),
+ (0xF923, 'M', u'藍'),
+ (0xF924, 'M', u'襤'),
+ (0xF925, 'M', u'拉'),
+ (0xF926, 'M', u'臘'),
+ (0xF927, 'M', u'蠟'),
+ (0xF928, 'M', u'廊'),
+ (0xF929, 'M', u'朗'),
+ (0xF92A, 'M', u'浪'),
+ (0xF92B, 'M', u'狼'),
+ (0xF92C, 'M', u'郎'),
+ (0xF92D, 'M', u'來'),
+ (0xF92E, 'M', u'冷'),
+ (0xF92F, 'M', u'勞'),
+ (0xF930, 'M', u'擄'),
+ (0xF931, 'M', u'櫓'),
+ (0xF932, 'M', u'爐'),
+ (0xF933, 'M', u'盧'),
+ (0xF934, 'M', u'老'),
+ (0xF935, 'M', u'蘆'),
+ (0xF936, 'M', u'虜'),
+ (0xF937, 'M', u'路'),
+ (0xF938, 'M', u'露'),
+ (0xF939, 'M', u'魯'),
+ (0xF93A, 'M', u'鷺'),
+ (0xF93B, 'M', u'碌'),
+ (0xF93C, 'M', u'祿'),
+ (0xF93D, 'M', u'綠'),
+ (0xF93E, 'M', u'菉'),
+ (0xF93F, 'M', u'錄'),
+ (0xF940, 'M', u'鹿'),
+ (0xF941, 'M', u'論'),
+ (0xF942, 'M', u'壟'),
+ (0xF943, 'M', u'弄'),
+ (0xF944, 'M', u'籠'),
+ (0xF945, 'M', u'聾'),
+ (0xF946, 'M', u'牢'),
+ (0xF947, 'M', u'磊'),
+ (0xF948, 'M', u'賂'),
+ (0xF949, 'M', u'雷'),
+ (0xF94A, 'M', u'壘'),
+ (0xF94B, 'M', u'屢'),
+ (0xF94C, 'M', u'樓'),
+ (0xF94D, 'M', u'淚'),
+ (0xF94E, 'M', u'漏'),
+ (0xF94F, 'M', u'累'),
+ (0xF950, 'M', u'縷'),
+ (0xF951, 'M', u'陋'),
+ (0xF952, 'M', u'勒'),
+ (0xF953, 'M', u'肋'),
+ (0xF954, 'M', u'凜'),
+ (0xF955, 'M', u'凌'),
+ (0xF956, 'M', u'稜'),
+ (0xF957, 'M', u'綾'),
+ (0xF958, 'M', u'菱'),
+ (0xF959, 'M', u'陵'),
+ (0xF95A, 'M', u'讀'),
+ (0xF95B, 'M', u'拏'),
+ (0xF95C, 'M', u'樂'),
+ (0xF95D, 'M', u'諾'),
+ (0xF95E, 'M', u'丹'),
+ (0xF95F, 'M', u'寧'),
+ (0xF960, 'M', u'怒'),
+ (0xF961, 'M', u'率'),
+ (0xF962, 'M', u'異'),
+ (0xF963, 'M', u'北'),
+ (0xF964, 'M', u'磻'),
+ (0xF965, 'M', u'便'),
+ (0xF966, 'M', u'復'),
+ (0xF967, 'M', u'不'),
+ (0xF968, 'M', u'泌'),
+ (0xF969, 'M', u'數'),
+ (0xF96A, 'M', u'索'),
+ (0xF96B, 'M', u'參'),
+ (0xF96C, 'M', u'塞'),
+ (0xF96D, 'M', u'省'),
+ (0xF96E, 'M', u'葉'),
+ (0xF96F, 'M', u'說'),
+ (0xF970, 'M', u'殺'),
+ (0xF971, 'M', u'辰'),
+ (0xF972, 'M', u'沈'),
+ (0xF973, 'M', u'拾'),
+ (0xF974, 'M', u'若'),
+ (0xF975, 'M', u'掠'),
+ (0xF976, 'M', u'略'),
+ (0xF977, 'M', u'亮'),
+ (0xF978, 'M', u'兩'),
+ (0xF979, 'M', u'凉'),
]
-def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_40():
return [
- (0xF9AA, 'M', '寧'),
- (0xF9AB, 'M', '嶺'),
- (0xF9AC, 'M', '怜'),
- (0xF9AD, 'M', '玲'),
- (0xF9AE, 'M', '瑩'),
- (0xF9AF, 'M', '羚'),
- (0xF9B0, 'M', '聆'),
- (0xF9B1, 'M', '鈴'),
- (0xF9B2, 'M', '零'),
- (0xF9B3, 'M', '靈'),
- (0xF9B4, 'M', '領'),
- (0xF9B5, 'M', '例'),
- (0xF9B6, 'M', '禮'),
- (0xF9B7, 'M', '醴'),
- (0xF9B8, 'M', '隸'),
- (0xF9B9, 'M', '惡'),
- (0xF9BA, 'M', '了'),
- (0xF9BB, 'M', '僚'),
- (0xF9BC, 'M', '寮'),
- (0xF9BD, 'M', '尿'),
- (0xF9BE, 'M', '料'),
- (0xF9BF, 'M', '樂'),
- (0xF9C0, 'M', '燎'),
- (0xF9C1, 'M', '療'),
- (0xF9C2, 'M', '蓼'),
- (0xF9C3, 'M', '遼'),
- (0xF9C4, 'M', '龍'),
- (0xF9C5, 'M', '暈'),
- (0xF9C6, 'M', '阮'),
- (0xF9C7, 'M', '劉'),
- (0xF9C8, 'M', '杻'),
- (0xF9C9, 'M', '柳'),
- (0xF9CA, 'M', '流'),
- (0xF9CB, 'M', '溜'),
- (0xF9CC, 'M', '琉'),
- (0xF9CD, 'M', '留'),
- (0xF9CE, 'M', '硫'),
- (0xF9CF, 'M', '紐'),
- (0xF9D0, 'M', '類'),
- (0xF9D1, 'M', '六'),
- (0xF9D2, 'M', '戮'),
- (0xF9D3, 'M', '陸'),
- (0xF9D4, 'M', '倫'),
- (0xF9D5, 'M', '崙'),
- (0xF9D6, 'M', '淪'),
- (0xF9D7, 'M', '輪'),
- (0xF9D8, 'M', '律'),
- (0xF9D9, 'M', '慄'),
- (0xF9DA, 'M', '栗'),
- (0xF9DB, 'M', '率'),
- (0xF9DC, 'M', '隆'),
- (0xF9DD, 'M', '利'),
- (0xF9DE, 'M', '吏'),
- (0xF9DF, 'M', '履'),
- (0xF9E0, 'M', '易'),
- (0xF9E1, 'M', '李'),
- (0xF9E2, 'M', '梨'),
- (0xF9E3, 'M', '泥'),
- (0xF9E4, 'M', '理'),
- (0xF9E5, 'M', '痢'),
- (0xF9E6, 'M', '罹'),
- (0xF9E7, 'M', '裏'),
- (0xF9E8, 'M', '裡'),
- (0xF9E9, 'M', '里'),
- (0xF9EA, 'M', '離'),
- (0xF9EB, 'M', '匿'),
- (0xF9EC, 'M', '溺'),
- (0xF9ED, 'M', '吝'),
- (0xF9EE, 'M', '燐'),
- (0xF9EF, 'M', '璘'),
- (0xF9F0, 'M', '藺'),
- (0xF9F1, 'M', '隣'),
- (0xF9F2, 'M', '鱗'),
- (0xF9F3, 'M', '麟'),
- (0xF9F4, 'M', '林'),
- (0xF9F5, 'M', '淋'),
- (0xF9F6, 'M', '臨'),
- (0xF9F7, 'M', '立'),
- (0xF9F8, 'M', '笠'),
- (0xF9F9, 'M', '粒'),
- (0xF9FA, 'M', '狀'),
- (0xF9FB, 'M', '炙'),
- (0xF9FC, 'M', '識'),
- (0xF9FD, 'M', '什'),
- (0xF9FE, 'M', '茶'),
- (0xF9FF, 'M', '刺'),
- (0xFA00, 'M', '切'),
- (0xFA01, 'M', '度'),
- (0xFA02, 'M', '拓'),
- (0xFA03, 'M', '糖'),
- (0xFA04, 'M', '宅'),
- (0xFA05, 'M', '洞'),
- (0xFA06, 'M', '暴'),
- (0xFA07, 'M', '輻'),
- (0xFA08, 'M', '行'),
- (0xFA09, 'M', '降'),
- (0xFA0A, 'M', '見'),
- (0xFA0B, 'M', '廓'),
- (0xFA0C, 'M', '兀'),
- (0xFA0D, 'M', '嗀'),
+ (0xF97A, 'M', u'梁'),
+ (0xF97B, 'M', u'糧'),
+ (0xF97C, 'M', u'良'),
+ (0xF97D, 'M', u'諒'),
+ (0xF97E, 'M', u'量'),
+ (0xF97F, 'M', u'勵'),
+ (0xF980, 'M', u'呂'),
+ (0xF981, 'M', u'女'),
+ (0xF982, 'M', u'廬'),
+ (0xF983, 'M', u'旅'),
+ (0xF984, 'M', u'濾'),
+ (0xF985, 'M', u'礪'),
+ (0xF986, 'M', u'閭'),
+ (0xF987, 'M', u'驪'),
+ (0xF988, 'M', u'麗'),
+ (0xF989, 'M', u'黎'),
+ (0xF98A, 'M', u'力'),
+ (0xF98B, 'M', u'曆'),
+ (0xF98C, 'M', u'歷'),
+ (0xF98D, 'M', u'轢'),
+ (0xF98E, 'M', u'年'),
+ (0xF98F, 'M', u'憐'),
+ (0xF990, 'M', u'戀'),
+ (0xF991, 'M', u'撚'),
+ (0xF992, 'M', u'漣'),
+ (0xF993, 'M', u'煉'),
+ (0xF994, 'M', u'璉'),
+ (0xF995, 'M', u'秊'),
+ (0xF996, 'M', u'練'),
+ (0xF997, 'M', u'聯'),
+ (0xF998, 'M', u'輦'),
+ (0xF999, 'M', u'蓮'),
+ (0xF99A, 'M', u'連'),
+ (0xF99B, 'M', u'鍊'),
+ (0xF99C, 'M', u'列'),
+ (0xF99D, 'M', u'劣'),
+ (0xF99E, 'M', u'咽'),
+ (0xF99F, 'M', u'烈'),
+ (0xF9A0, 'M', u'裂'),
+ (0xF9A1, 'M', u'說'),
+ (0xF9A2, 'M', u'廉'),
+ (0xF9A3, 'M', u'念'),
+ (0xF9A4, 'M', u'捻'),
+ (0xF9A5, 'M', u'殮'),
+ (0xF9A6, 'M', u'簾'),
+ (0xF9A7, 'M', u'獵'),
+ (0xF9A8, 'M', u'令'),
+ (0xF9A9, 'M', u'囹'),
+ (0xF9AA, 'M', u'寧'),
+ (0xF9AB, 'M', u'嶺'),
+ (0xF9AC, 'M', u'怜'),
+ (0xF9AD, 'M', u'玲'),
+ (0xF9AE, 'M', u'瑩'),
+ (0xF9AF, 'M', u'羚'),
+ (0xF9B0, 'M', u'聆'),
+ (0xF9B1, 'M', u'鈴'),
+ (0xF9B2, 'M', u'零'),
+ (0xF9B3, 'M', u'靈'),
+ (0xF9B4, 'M', u'領'),
+ (0xF9B5, 'M', u'例'),
+ (0xF9B6, 'M', u'禮'),
+ (0xF9B7, 'M', u'醴'),
+ (0xF9B8, 'M', u'隸'),
+ (0xF9B9, 'M', u'惡'),
+ (0xF9BA, 'M', u'了'),
+ (0xF9BB, 'M', u'僚'),
+ (0xF9BC, 'M', u'寮'),
+ (0xF9BD, 'M', u'尿'),
+ (0xF9BE, 'M', u'料'),
+ (0xF9BF, 'M', u'樂'),
+ (0xF9C0, 'M', u'燎'),
+ (0xF9C1, 'M', u'療'),
+ (0xF9C2, 'M', u'蓼'),
+ (0xF9C3, 'M', u'遼'),
+ (0xF9C4, 'M', u'龍'),
+ (0xF9C5, 'M', u'暈'),
+ (0xF9C6, 'M', u'阮'),
+ (0xF9C7, 'M', u'劉'),
+ (0xF9C8, 'M', u'杻'),
+ (0xF9C9, 'M', u'柳'),
+ (0xF9CA, 'M', u'流'),
+ (0xF9CB, 'M', u'溜'),
+ (0xF9CC, 'M', u'琉'),
+ (0xF9CD, 'M', u'留'),
+ (0xF9CE, 'M', u'硫'),
+ (0xF9CF, 'M', u'紐'),
+ (0xF9D0, 'M', u'類'),
+ (0xF9D1, 'M', u'六'),
+ (0xF9D2, 'M', u'戮'),
+ (0xF9D3, 'M', u'陸'),
+ (0xF9D4, 'M', u'倫'),
+ (0xF9D5, 'M', u'崙'),
+ (0xF9D6, 'M', u'淪'),
+ (0xF9D7, 'M', u'輪'),
+ (0xF9D8, 'M', u'律'),
+ (0xF9D9, 'M', u'慄'),
+ (0xF9DA, 'M', u'栗'),
+ (0xF9DB, 'M', u'率'),
+ (0xF9DC, 'M', u'隆'),
+ (0xF9DD, 'M', u'利'),
]
-def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_41():
return [
+ (0xF9DE, 'M', u'吏'),
+ (0xF9DF, 'M', u'履'),
+ (0xF9E0, 'M', u'易'),
+ (0xF9E1, 'M', u'李'),
+ (0xF9E2, 'M', u'梨'),
+ (0xF9E3, 'M', u'泥'),
+ (0xF9E4, 'M', u'理'),
+ (0xF9E5, 'M', u'痢'),
+ (0xF9E6, 'M', u'罹'),
+ (0xF9E7, 'M', u'裏'),
+ (0xF9E8, 'M', u'裡'),
+ (0xF9E9, 'M', u'里'),
+ (0xF9EA, 'M', u'離'),
+ (0xF9EB, 'M', u'匿'),
+ (0xF9EC, 'M', u'溺'),
+ (0xF9ED, 'M', u'吝'),
+ (0xF9EE, 'M', u'燐'),
+ (0xF9EF, 'M', u'璘'),
+ (0xF9F0, 'M', u'藺'),
+ (0xF9F1, 'M', u'隣'),
+ (0xF9F2, 'M', u'鱗'),
+ (0xF9F3, 'M', u'麟'),
+ (0xF9F4, 'M', u'林'),
+ (0xF9F5, 'M', u'淋'),
+ (0xF9F6, 'M', u'臨'),
+ (0xF9F7, 'M', u'立'),
+ (0xF9F8, 'M', u'笠'),
+ (0xF9F9, 'M', u'粒'),
+ (0xF9FA, 'M', u'狀'),
+ (0xF9FB, 'M', u'炙'),
+ (0xF9FC, 'M', u'識'),
+ (0xF9FD, 'M', u'什'),
+ (0xF9FE, 'M', u'茶'),
+ (0xF9FF, 'M', u'刺'),
+ (0xFA00, 'M', u'切'),
+ (0xFA01, 'M', u'度'),
+ (0xFA02, 'M', u'拓'),
+ (0xFA03, 'M', u'糖'),
+ (0xFA04, 'M', u'宅'),
+ (0xFA05, 'M', u'洞'),
+ (0xFA06, 'M', u'暴'),
+ (0xFA07, 'M', u'輻'),
+ (0xFA08, 'M', u'行'),
+ (0xFA09, 'M', u'降'),
+ (0xFA0A, 'M', u'見'),
+ (0xFA0B, 'M', u'廓'),
+ (0xFA0C, 'M', u'兀'),
+ (0xFA0D, 'M', u'嗀'),
(0xFA0E, 'V'),
- (0xFA10, 'M', '塚'),
+ (0xFA10, 'M', u'塚'),
(0xFA11, 'V'),
- (0xFA12, 'M', '晴'),
+ (0xFA12, 'M', u'晴'),
(0xFA13, 'V'),
- (0xFA15, 'M', '凞'),
- (0xFA16, 'M', '猪'),
- (0xFA17, 'M', '益'),
- (0xFA18, 'M', '礼'),
- (0xFA19, 'M', '神'),
- (0xFA1A, 'M', '祥'),
- (0xFA1B, 'M', '福'),
- (0xFA1C, 'M', '靖'),
- (0xFA1D, 'M', '精'),
- (0xFA1E, 'M', '羽'),
+ (0xFA15, 'M', u'凞'),
+ (0xFA16, 'M', u'猪'),
+ (0xFA17, 'M', u'益'),
+ (0xFA18, 'M', u'礼'),
+ (0xFA19, 'M', u'神'),
+ (0xFA1A, 'M', u'祥'),
+ (0xFA1B, 'M', u'福'),
+ (0xFA1C, 'M', u'靖'),
+ (0xFA1D, 'M', u'精'),
+ (0xFA1E, 'M', u'羽'),
(0xFA1F, 'V'),
- (0xFA20, 'M', '蘒'),
+ (0xFA20, 'M', u'蘒'),
(0xFA21, 'V'),
- (0xFA22, 'M', '諸'),
+ (0xFA22, 'M', u'諸'),
(0xFA23, 'V'),
- (0xFA25, 'M', '逸'),
- (0xFA26, 'M', '都'),
+ (0xFA25, 'M', u'逸'),
+ (0xFA26, 'M', u'都'),
(0xFA27, 'V'),
- (0xFA2A, 'M', '飯'),
- (0xFA2B, 'M', '飼'),
- (0xFA2C, 'M', '館'),
- (0xFA2D, 'M', '鶴'),
- (0xFA2E, 'M', '郞'),
- (0xFA2F, 'M', '隷'),
- (0xFA30, 'M', '侮'),
- (0xFA31, 'M', '僧'),
- (0xFA32, 'M', '免'),
- (0xFA33, 'M', '勉'),
- (0xFA34, 'M', '勤'),
- (0xFA35, 'M', '卑'),
- (0xFA36, 'M', '喝'),
- (0xFA37, 'M', '嘆'),
- (0xFA38, 'M', '器'),
- (0xFA39, 'M', '塀'),
- (0xFA3A, 'M', '墨'),
- (0xFA3B, 'M', '層'),
- (0xFA3C, 'M', '屮'),
- (0xFA3D, 'M', '悔'),
- (0xFA3E, 'M', '慨'),
- (0xFA3F, 'M', '憎'),
- (0xFA40, 'M', '懲'),
- (0xFA41, 'M', '敏'),
- (0xFA42, 'M', '既'),
- (0xFA43, 'M', '暑'),
- (0xFA44, 'M', '梅'),
- (0xFA45, 'M', '海'),
- (0xFA46, 'M', '渚'),
- (0xFA47, 'M', '漢'),
- (0xFA48, 'M', '煮'),
- (0xFA49, 'M', '爫'),
- (0xFA4A, 'M', '琢'),
- (0xFA4B, 'M', '碑'),
- (0xFA4C, 'M', '社'),
- (0xFA4D, 'M', '祉'),
- (0xFA4E, 'M', '祈'),
- (0xFA4F, 'M', '祐'),
- (0xFA50, 'M', '祖'),
- (0xFA51, 'M', '祝'),
- (0xFA52, 'M', '禍'),
- (0xFA53, 'M', '禎'),
- (0xFA54, 'M', '穀'),
- (0xFA55, 'M', '突'),
- (0xFA56, 'M', '節'),
- (0xFA57, 'M', '練'),
- (0xFA58, 'M', '縉'),
- (0xFA59, 'M', '繁'),
- (0xFA5A, 'M', '署'),
- (0xFA5B, 'M', '者'),
- (0xFA5C, 'M', '臭'),
- (0xFA5D, 'M', '艹'),
- (0xFA5F, 'M', '著'),
- (0xFA60, 'M', '褐'),
- (0xFA61, 'M', '視'),
- (0xFA62, 'M', '謁'),
- (0xFA63, 'M', '謹'),
- (0xFA64, 'M', '賓'),
- (0xFA65, 'M', '贈'),
- (0xFA66, 'M', '辶'),
- (0xFA67, 'M', '逸'),
- (0xFA68, 'M', '難'),
- (0xFA69, 'M', '響'),
- (0xFA6A, 'M', '頻'),
- (0xFA6B, 'M', '恵'),
- (0xFA6C, 'M', '𤋮'),
- (0xFA6D, 'M', '舘'),
- (0xFA6E, 'X'),
- (0xFA70, 'M', '並'),
- (0xFA71, 'M', '况'),
- (0xFA72, 'M', '全'),
- (0xFA73, 'M', '侀'),
- (0xFA74, 'M', '充'),
- (0xFA75, 'M', '冀'),
- (0xFA76, 'M', '勇'),
- (0xFA77, 'M', '勺'),
- (0xFA78, 'M', '喝'),
+ (0xFA2A, 'M', u'飯'),
+ (0xFA2B, 'M', u'飼'),
+ (0xFA2C, 'M', u'館'),
+ (0xFA2D, 'M', u'鶴'),
+ (0xFA2E, 'M', u'郞'),
+ (0xFA2F, 'M', u'隷'),
+ (0xFA30, 'M', u'侮'),
+ (0xFA31, 'M', u'僧'),
+ (0xFA32, 'M', u'免'),
+ (0xFA33, 'M', u'勉'),
+ (0xFA34, 'M', u'勤'),
+ (0xFA35, 'M', u'卑'),
+ (0xFA36, 'M', u'喝'),
+ (0xFA37, 'M', u'嘆'),
+ (0xFA38, 'M', u'器'),
+ (0xFA39, 'M', u'塀'),
+ (0xFA3A, 'M', u'墨'),
+ (0xFA3B, 'M', u'層'),
+ (0xFA3C, 'M', u'屮'),
+ (0xFA3D, 'M', u'悔'),
+ (0xFA3E, 'M', u'慨'),
+ (0xFA3F, 'M', u'憎'),
+ (0xFA40, 'M', u'懲'),
+ (0xFA41, 'M', u'敏'),
+ (0xFA42, 'M', u'既'),
+ (0xFA43, 'M', u'暑'),
+ (0xFA44, 'M', u'梅'),
+ (0xFA45, 'M', u'海'),
+ (0xFA46, 'M', u'渚'),
]
-def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_42():
return [
- (0xFA79, 'M', '啕'),
- (0xFA7A, 'M', '喙'),
- (0xFA7B, 'M', '嗢'),
- (0xFA7C, 'M', '塚'),
- (0xFA7D, 'M', '墳'),
- (0xFA7E, 'M', '奄'),
- (0xFA7F, 'M', '奔'),
- (0xFA80, 'M', '婢'),
- (0xFA81, 'M', '嬨'),
- (0xFA82, 'M', '廒'),
- (0xFA83, 'M', '廙'),
- (0xFA84, 'M', '彩'),
- (0xFA85, 'M', '徭'),
- (0xFA86, 'M', '惘'),
- (0xFA87, 'M', '慎'),
- (0xFA88, 'M', '愈'),
- (0xFA89, 'M', '憎'),
- (0xFA8A, 'M', '慠'),
- (0xFA8B, 'M', '懲'),
- (0xFA8C, 'M', '戴'),
- (0xFA8D, 'M', '揄'),
- (0xFA8E, 'M', '搜'),
- (0xFA8F, 'M', '摒'),
- (0xFA90, 'M', '敖'),
- (0xFA91, 'M', '晴'),
- (0xFA92, 'M', '朗'),
- (0xFA93, 'M', '望'),
- (0xFA94, 'M', '杖'),
- (0xFA95, 'M', '歹'),
- (0xFA96, 'M', '殺'),
- (0xFA97, 'M', '流'),
- (0xFA98, 'M', '滛'),
- (0xFA99, 'M', '滋'),
- (0xFA9A, 'M', '漢'),
- (0xFA9B, 'M', '瀞'),
- (0xFA9C, 'M', '煮'),
- (0xFA9D, 'M', '瞧'),
- (0xFA9E, 'M', '爵'),
- (0xFA9F, 'M', '犯'),
- (0xFAA0, 'M', '猪'),
- (0xFAA1, 'M', '瑱'),
- (0xFAA2, 'M', '甆'),
- (0xFAA3, 'M', '画'),
- (0xFAA4, 'M', '瘝'),
- (0xFAA5, 'M', '瘟'),
- (0xFAA6, 'M', '益'),
- (0xFAA7, 'M', '盛'),
- (0xFAA8, 'M', '直'),
- (0xFAA9, 'M', '睊'),
- (0xFAAA, 'M', '着'),
- (0xFAAB, 'M', '磌'),
- (0xFAAC, 'M', '窱'),
- (0xFAAD, 'M', '節'),
- (0xFAAE, 'M', '类'),
- (0xFAAF, 'M', '絛'),
- (0xFAB0, 'M', '練'),
- (0xFAB1, 'M', '缾'),
- (0xFAB2, 'M', '者'),
- (0xFAB3, 'M', '荒'),
- (0xFAB4, 'M', '華'),
- (0xFAB5, 'M', '蝹'),
- (0xFAB6, 'M', '襁'),
- (0xFAB7, 'M', '覆'),
- (0xFAB8, 'M', '視'),
- (0xFAB9, 'M', '調'),
- (0xFABA, 'M', '諸'),
- (0xFABB, 'M', '請'),
- (0xFABC, 'M', '謁'),
- (0xFABD, 'M', '諾'),
- (0xFABE, 'M', '諭'),
- (0xFABF, 'M', '謹'),
- (0xFAC0, 'M', '變'),
- (0xFAC1, 'M', '贈'),
- (0xFAC2, 'M', '輸'),
- (0xFAC3, 'M', '遲'),
- (0xFAC4, 'M', '醙'),
- (0xFAC5, 'M', '鉶'),
- (0xFAC6, 'M', '陼'),
- (0xFAC7, 'M', '難'),
- (0xFAC8, 'M', '靖'),
- (0xFAC9, 'M', '韛'),
- (0xFACA, 'M', '響'),
- (0xFACB, 'M', '頋'),
- (0xFACC, 'M', '頻'),
- (0xFACD, 'M', '鬒'),
- (0xFACE, 'M', '龜'),
- (0xFACF, 'M', '𢡊'),
- (0xFAD0, 'M', '𢡄'),
- (0xFAD1, 'M', '𣏕'),
- (0xFAD2, 'M', '㮝'),
- (0xFAD3, 'M', '䀘'),
- (0xFAD4, 'M', '䀹'),
- (0xFAD5, 'M', '𥉉'),
- (0xFAD6, 'M', '𥳐'),
- (0xFAD7, 'M', '𧻓'),
- (0xFAD8, 'M', '齃'),
- (0xFAD9, 'M', '龎'),
- (0xFADA, 'X'),
- (0xFB00, 'M', 'ff'),
- (0xFB01, 'M', 'fi'),
+ (0xFA47, 'M', u'漢'),
+ (0xFA48, 'M', u'煮'),
+ (0xFA49, 'M', u'爫'),
+ (0xFA4A, 'M', u'琢'),
+ (0xFA4B, 'M', u'碑'),
+ (0xFA4C, 'M', u'社'),
+ (0xFA4D, 'M', u'祉'),
+ (0xFA4E, 'M', u'祈'),
+ (0xFA4F, 'M', u'祐'),
+ (0xFA50, 'M', u'祖'),
+ (0xFA51, 'M', u'祝'),
+ (0xFA52, 'M', u'禍'),
+ (0xFA53, 'M', u'禎'),
+ (0xFA54, 'M', u'穀'),
+ (0xFA55, 'M', u'突'),
+ (0xFA56, 'M', u'節'),
+ (0xFA57, 'M', u'練'),
+ (0xFA58, 'M', u'縉'),
+ (0xFA59, 'M', u'繁'),
+ (0xFA5A, 'M', u'署'),
+ (0xFA5B, 'M', u'者'),
+ (0xFA5C, 'M', u'臭'),
+ (0xFA5D, 'M', u'艹'),
+ (0xFA5F, 'M', u'著'),
+ (0xFA60, 'M', u'褐'),
+ (0xFA61, 'M', u'視'),
+ (0xFA62, 'M', u'謁'),
+ (0xFA63, 'M', u'謹'),
+ (0xFA64, 'M', u'賓'),
+ (0xFA65, 'M', u'贈'),
+ (0xFA66, 'M', u'辶'),
+ (0xFA67, 'M', u'逸'),
+ (0xFA68, 'M', u'難'),
+ (0xFA69, 'M', u'響'),
+ (0xFA6A, 'M', u'頻'),
+ (0xFA6B, 'M', u'恵'),
+ (0xFA6C, 'M', u'𤋮'),
+ (0xFA6D, 'M', u'舘'),
+ (0xFA6E, 'X'),
+ (0xFA70, 'M', u'並'),
+ (0xFA71, 'M', u'况'),
+ (0xFA72, 'M', u'全'),
+ (0xFA73, 'M', u'侀'),
+ (0xFA74, 'M', u'充'),
+ (0xFA75, 'M', u'冀'),
+ (0xFA76, 'M', u'勇'),
+ (0xFA77, 'M', u'勺'),
+ (0xFA78, 'M', u'喝'),
+ (0xFA79, 'M', u'啕'),
+ (0xFA7A, 'M', u'喙'),
+ (0xFA7B, 'M', u'嗢'),
+ (0xFA7C, 'M', u'塚'),
+ (0xFA7D, 'M', u'墳'),
+ (0xFA7E, 'M', u'奄'),
+ (0xFA7F, 'M', u'奔'),
+ (0xFA80, 'M', u'婢'),
+ (0xFA81, 'M', u'嬨'),
+ (0xFA82, 'M', u'廒'),
+ (0xFA83, 'M', u'廙'),
+ (0xFA84, 'M', u'彩'),
+ (0xFA85, 'M', u'徭'),
+ (0xFA86, 'M', u'惘'),
+ (0xFA87, 'M', u'慎'),
+ (0xFA88, 'M', u'愈'),
+ (0xFA89, 'M', u'憎'),
+ (0xFA8A, 'M', u'慠'),
+ (0xFA8B, 'M', u'懲'),
+ (0xFA8C, 'M', u'戴'),
+ (0xFA8D, 'M', u'揄'),
+ (0xFA8E, 'M', u'搜'),
+ (0xFA8F, 'M', u'摒'),
+ (0xFA90, 'M', u'敖'),
+ (0xFA91, 'M', u'晴'),
+ (0xFA92, 'M', u'朗'),
+ (0xFA93, 'M', u'望'),
+ (0xFA94, 'M', u'杖'),
+ (0xFA95, 'M', u'歹'),
+ (0xFA96, 'M', u'殺'),
+ (0xFA97, 'M', u'流'),
+ (0xFA98, 'M', u'滛'),
+ (0xFA99, 'M', u'滋'),
+ (0xFA9A, 'M', u'漢'),
+ (0xFA9B, 'M', u'瀞'),
+ (0xFA9C, 'M', u'煮'),
+ (0xFA9D, 'M', u'瞧'),
+ (0xFA9E, 'M', u'爵'),
+ (0xFA9F, 'M', u'犯'),
+ (0xFAA0, 'M', u'猪'),
+ (0xFAA1, 'M', u'瑱'),
+ (0xFAA2, 'M', u'甆'),
+ (0xFAA3, 'M', u'画'),
+ (0xFAA4, 'M', u'瘝'),
+ (0xFAA5, 'M', u'瘟'),
+ (0xFAA6, 'M', u'益'),
+ (0xFAA7, 'M', u'盛'),
+ (0xFAA8, 'M', u'直'),
+ (0xFAA9, 'M', u'睊'),
+ (0xFAAA, 'M', u'着'),
+ (0xFAAB, 'M', u'磌'),
+ (0xFAAC, 'M', u'窱'),
]
-def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_43():
return [
- (0xFB02, 'M', 'fl'),
- (0xFB03, 'M', 'ffi'),
- (0xFB04, 'M', 'ffl'),
- (0xFB05, 'M', 'st'),
+ (0xFAAD, 'M', u'節'),
+ (0xFAAE, 'M', u'类'),
+ (0xFAAF, 'M', u'絛'),
+ (0xFAB0, 'M', u'練'),
+ (0xFAB1, 'M', u'缾'),
+ (0xFAB2, 'M', u'者'),
+ (0xFAB3, 'M', u'荒'),
+ (0xFAB4, 'M', u'華'),
+ (0xFAB5, 'M', u'蝹'),
+ (0xFAB6, 'M', u'襁'),
+ (0xFAB7, 'M', u'覆'),
+ (0xFAB8, 'M', u'視'),
+ (0xFAB9, 'M', u'調'),
+ (0xFABA, 'M', u'諸'),
+ (0xFABB, 'M', u'請'),
+ (0xFABC, 'M', u'謁'),
+ (0xFABD, 'M', u'諾'),
+ (0xFABE, 'M', u'諭'),
+ (0xFABF, 'M', u'謹'),
+ (0xFAC0, 'M', u'變'),
+ (0xFAC1, 'M', u'贈'),
+ (0xFAC2, 'M', u'輸'),
+ (0xFAC3, 'M', u'遲'),
+ (0xFAC4, 'M', u'醙'),
+ (0xFAC5, 'M', u'鉶'),
+ (0xFAC6, 'M', u'陼'),
+ (0xFAC7, 'M', u'難'),
+ (0xFAC8, 'M', u'靖'),
+ (0xFAC9, 'M', u'韛'),
+ (0xFACA, 'M', u'響'),
+ (0xFACB, 'M', u'頋'),
+ (0xFACC, 'M', u'頻'),
+ (0xFACD, 'M', u'鬒'),
+ (0xFACE, 'M', u'龜'),
+ (0xFACF, 'M', u'𢡊'),
+ (0xFAD0, 'M', u'𢡄'),
+ (0xFAD1, 'M', u'𣏕'),
+ (0xFAD2, 'M', u'㮝'),
+ (0xFAD3, 'M', u'䀘'),
+ (0xFAD4, 'M', u'䀹'),
+ (0xFAD5, 'M', u'𥉉'),
+ (0xFAD6, 'M', u'𥳐'),
+ (0xFAD7, 'M', u'𧻓'),
+ (0xFAD8, 'M', u'齃'),
+ (0xFAD9, 'M', u'龎'),
+ (0xFADA, 'X'),
+ (0xFB00, 'M', u'ff'),
+ (0xFB01, 'M', u'fi'),
+ (0xFB02, 'M', u'fl'),
+ (0xFB03, 'M', u'ffi'),
+ (0xFB04, 'M', u'ffl'),
+ (0xFB05, 'M', u'st'),
(0xFB07, 'X'),
- (0xFB13, 'M', 'մն'),
- (0xFB14, 'M', 'մե'),
- (0xFB15, 'M', 'մի'),
- (0xFB16, 'M', 'վն'),
- (0xFB17, 'M', 'մխ'),
+ (0xFB13, 'M', u'մն'),
+ (0xFB14, 'M', u'մե'),
+ (0xFB15, 'M', u'մի'),
+ (0xFB16, 'M', u'վն'),
+ (0xFB17, 'M', u'մխ'),
(0xFB18, 'X'),
- (0xFB1D, 'M', 'יִ'),
+ (0xFB1D, 'M', u'יִ'),
(0xFB1E, 'V'),
- (0xFB1F, 'M', 'ײַ'),
- (0xFB20, 'M', 'ע'),
- (0xFB21, 'M', 'א'),
- (0xFB22, 'M', 'ד'),
- (0xFB23, 'M', 'ה'),
- (0xFB24, 'M', 'כ'),
- (0xFB25, 'M', 'ל'),
- (0xFB26, 'M', 'ם'),
- (0xFB27, 'M', 'ר'),
- (0xFB28, 'M', 'ת'),
- (0xFB29, '3', '+'),
- (0xFB2A, 'M', 'שׁ'),
- (0xFB2B, 'M', 'שׂ'),
- (0xFB2C, 'M', 'שּׁ'),
- (0xFB2D, 'M', 'שּׂ'),
- (0xFB2E, 'M', 'אַ'),
- (0xFB2F, 'M', 'אָ'),
- (0xFB30, 'M', 'אּ'),
- (0xFB31, 'M', 'בּ'),
- (0xFB32, 'M', 'גּ'),
- (0xFB33, 'M', 'דּ'),
- (0xFB34, 'M', 'הּ'),
- (0xFB35, 'M', 'וּ'),
- (0xFB36, 'M', 'זּ'),
+ (0xFB1F, 'M', u'ײַ'),
+ (0xFB20, 'M', u'ע'),
+ (0xFB21, 'M', u'א'),
+ (0xFB22, 'M', u'ד'),
+ (0xFB23, 'M', u'ה'),
+ (0xFB24, 'M', u'כ'),
+ (0xFB25, 'M', u'ל'),
+ (0xFB26, 'M', u'ם'),
+ (0xFB27, 'M', u'ר'),
+ (0xFB28, 'M', u'ת'),
+ (0xFB29, '3', u'+'),
+ (0xFB2A, 'M', u'שׁ'),
+ (0xFB2B, 'M', u'שׂ'),
+ (0xFB2C, 'M', u'שּׁ'),
+ (0xFB2D, 'M', u'שּׂ'),
+ (0xFB2E, 'M', u'אַ'),
+ (0xFB2F, 'M', u'אָ'),
+ (0xFB30, 'M', u'אּ'),
+ (0xFB31, 'M', u'בּ'),
+ (0xFB32, 'M', u'גּ'),
+ (0xFB33, 'M', u'דּ'),
+ (0xFB34, 'M', u'הּ'),
+ (0xFB35, 'M', u'וּ'),
+ (0xFB36, 'M', u'זּ'),
(0xFB37, 'X'),
- (0xFB38, 'M', 'טּ'),
- (0xFB39, 'M', 'יּ'),
- (0xFB3A, 'M', 'ךּ'),
- (0xFB3B, 'M', 'כּ'),
- (0xFB3C, 'M', 'לּ'),
+ (0xFB38, 'M', u'טּ'),
+ (0xFB39, 'M', u'יּ'),
+ (0xFB3A, 'M', u'ךּ'),
+ (0xFB3B, 'M', u'כּ'),
+ (0xFB3C, 'M', u'לּ'),
(0xFB3D, 'X'),
- (0xFB3E, 'M', 'מּ'),
+ (0xFB3E, 'M', u'מּ'),
(0xFB3F, 'X'),
- (0xFB40, 'M', 'נּ'),
- (0xFB41, 'M', 'סּ'),
+ (0xFB40, 'M', u'נּ'),
+ (0xFB41, 'M', u'סּ'),
(0xFB42, 'X'),
- (0xFB43, 'M', 'ףּ'),
- (0xFB44, 'M', 'פּ'),
+ (0xFB43, 'M', u'ףּ'),
+ (0xFB44, 'M', u'פּ'),
(0xFB45, 'X'),
- (0xFB46, 'M', 'צּ'),
- (0xFB47, 'M', 'קּ'),
- (0xFB48, 'M', 'רּ'),
- (0xFB49, 'M', 'שּ'),
- (0xFB4A, 'M', 'תּ'),
- (0xFB4B, 'M', 'וֹ'),
- (0xFB4C, 'M', 'בֿ'),
- (0xFB4D, 'M', 'כֿ'),
- (0xFB4E, 'M', 'פֿ'),
- (0xFB4F, 'M', 'אל'),
- (0xFB50, 'M', 'ٱ'),
- (0xFB52, 'M', 'ٻ'),
- (0xFB56, 'M', 'پ'),
- (0xFB5A, 'M', 'ڀ'),
- (0xFB5E, 'M', 'ٺ'),
- (0xFB62, 'M', 'ٿ'),
- (0xFB66, 'M', 'ٹ'),
- (0xFB6A, 'M', 'ڤ'),
- (0xFB6E, 'M', 'ڦ'),
- (0xFB72, 'M', 'ڄ'),
- (0xFB76, 'M', 'ڃ'),
- (0xFB7A, 'M', 'چ'),
- (0xFB7E, 'M', 'ڇ'),
- (0xFB82, 'M', 'ڍ'),
- (0xFB84, 'M', 'ڌ'),
- (0xFB86, 'M', 'ڎ'),
- (0xFB88, 'M', 'ڈ'),
- (0xFB8A, 'M', 'ژ'),
- (0xFB8C, 'M', 'ڑ'),
- (0xFB8E, 'M', 'ک'),
- (0xFB92, 'M', 'گ'),
- (0xFB96, 'M', 'ڳ'),
- (0xFB9A, 'M', 'ڱ'),
- (0xFB9E, 'M', 'ں'),
- (0xFBA0, 'M', 'ڻ'),
- (0xFBA4, 'M', 'ۀ'),
- (0xFBA6, 'M', 'ہ'),
- (0xFBAA, 'M', 'ھ'),
- (0xFBAE, 'M', 'ے'),
- (0xFBB0, 'M', 'ۓ'),
- (0xFBB2, 'V'),
- (0xFBC3, 'X'),
- (0xFBD3, 'M', 'ڭ'),
- (0xFBD7, 'M', 'ۇ'),
- (0xFBD9, 'M', 'ۆ'),
- (0xFBDB, 'M', 'ۈ'),
- (0xFBDD, 'M', 'ۇٴ'),
- (0xFBDE, 'M', 'ۋ'),
]
-def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_44():
return [
- (0xFBE0, 'M', 'ۅ'),
- (0xFBE2, 'M', 'ۉ'),
- (0xFBE4, 'M', 'ې'),
- (0xFBE8, 'M', 'ى'),
- (0xFBEA, 'M', 'ئا'),
- (0xFBEC, 'M', 'ئە'),
- (0xFBEE, 'M', 'ئو'),
- (0xFBF0, 'M', 'ئۇ'),
- (0xFBF2, 'M', 'ئۆ'),
- (0xFBF4, 'M', 'ئۈ'),
- (0xFBF6, 'M', 'ئې'),
- (0xFBF9, 'M', 'ئى'),
- (0xFBFC, 'M', 'ی'),
- (0xFC00, 'M', 'ئج'),
- (0xFC01, 'M', 'ئح'),
- (0xFC02, 'M', 'ئم'),
- (0xFC03, 'M', 'ئى'),
- (0xFC04, 'M', 'ئي'),
- (0xFC05, 'M', 'بج'),
- (0xFC06, 'M', 'بح'),
- (0xFC07, 'M', 'بخ'),
- (0xFC08, 'M', 'بم'),
- (0xFC09, 'M', 'بى'),
- (0xFC0A, 'M', 'بي'),
- (0xFC0B, 'M', 'تج'),
- (0xFC0C, 'M', 'تح'),
- (0xFC0D, 'M', 'تخ'),
- (0xFC0E, 'M', 'تم'),
- (0xFC0F, 'M', 'تى'),
- (0xFC10, 'M', 'تي'),
- (0xFC11, 'M', 'ثج'),
- (0xFC12, 'M', 'ثم'),
- (0xFC13, 'M', 'ثى'),
- (0xFC14, 'M', 'ثي'),
- (0xFC15, 'M', 'جح'),
- (0xFC16, 'M', 'جم'),
- (0xFC17, 'M', 'حج'),
- (0xFC18, 'M', 'حم'),
- (0xFC19, 'M', 'خج'),
- (0xFC1A, 'M', 'خح'),
- (0xFC1B, 'M', 'خم'),
- (0xFC1C, 'M', 'سج'),
- (0xFC1D, 'M', 'سح'),
- (0xFC1E, 'M', 'سخ'),
- (0xFC1F, 'M', 'سم'),
- (0xFC20, 'M', 'صح'),
- (0xFC21, 'M', 'صم'),
- (0xFC22, 'M', 'ضج'),
- (0xFC23, 'M', 'ضح'),
- (0xFC24, 'M', 'ضخ'),
- (0xFC25, 'M', 'ضم'),
- (0xFC26, 'M', 'طح'),
- (0xFC27, 'M', 'طم'),
- (0xFC28, 'M', 'ظم'),
- (0xFC29, 'M', 'عج'),
- (0xFC2A, 'M', 'عم'),
- (0xFC2B, 'M', 'غج'),
- (0xFC2C, 'M', 'غم'),
- (0xFC2D, 'M', 'فج'),
- (0xFC2E, 'M', 'فح'),
- (0xFC2F, 'M', 'فخ'),
- (0xFC30, 'M', 'فم'),
- (0xFC31, 'M', 'فى'),
- (0xFC32, 'M', 'في'),
- (0xFC33, 'M', 'قح'),
- (0xFC34, 'M', 'قم'),
- (0xFC35, 'M', 'قى'),
- (0xFC36, 'M', 'قي'),
- (0xFC37, 'M', 'كا'),
- (0xFC38, 'M', 'كج'),
- (0xFC39, 'M', 'كح'),
- (0xFC3A, 'M', 'كخ'),
- (0xFC3B, 'M', 'كل'),
- (0xFC3C, 'M', 'كم'),
- (0xFC3D, 'M', 'كى'),
- (0xFC3E, 'M', 'كي'),
- (0xFC3F, 'M', 'لج'),
- (0xFC40, 'M', 'لح'),
- (0xFC41, 'M', 'لخ'),
- (0xFC42, 'M', 'لم'),
- (0xFC43, 'M', 'لى'),
- (0xFC44, 'M', 'لي'),
- (0xFC45, 'M', 'مج'),
- (0xFC46, 'M', 'مح'),
- (0xFC47, 'M', 'مخ'),
- (0xFC48, 'M', 'مم'),
- (0xFC49, 'M', 'مى'),
- (0xFC4A, 'M', 'مي'),
- (0xFC4B, 'M', 'نج'),
- (0xFC4C, 'M', 'نح'),
- (0xFC4D, 'M', 'نخ'),
- (0xFC4E, 'M', 'نم'),
- (0xFC4F, 'M', 'نى'),
- (0xFC50, 'M', 'ني'),
- (0xFC51, 'M', 'هج'),
- (0xFC52, 'M', 'هم'),
- (0xFC53, 'M', 'هى'),
- (0xFC54, 'M', 'هي'),
- (0xFC55, 'M', 'يج'),
- (0xFC56, 'M', 'يح'),
+ (0xFB46, 'M', u'צּ'),
+ (0xFB47, 'M', u'קּ'),
+ (0xFB48, 'M', u'רּ'),
+ (0xFB49, 'M', u'שּ'),
+ (0xFB4A, 'M', u'תּ'),
+ (0xFB4B, 'M', u'וֹ'),
+ (0xFB4C, 'M', u'בֿ'),
+ (0xFB4D, 'M', u'כֿ'),
+ (0xFB4E, 'M', u'פֿ'),
+ (0xFB4F, 'M', u'אל'),
+ (0xFB50, 'M', u'ٱ'),
+ (0xFB52, 'M', u'ٻ'),
+ (0xFB56, 'M', u'پ'),
+ (0xFB5A, 'M', u'ڀ'),
+ (0xFB5E, 'M', u'ٺ'),
+ (0xFB62, 'M', u'ٿ'),
+ (0xFB66, 'M', u'ٹ'),
+ (0xFB6A, 'M', u'ڤ'),
+ (0xFB6E, 'M', u'ڦ'),
+ (0xFB72, 'M', u'ڄ'),
+ (0xFB76, 'M', u'ڃ'),
+ (0xFB7A, 'M', u'چ'),
+ (0xFB7E, 'M', u'ڇ'),
+ (0xFB82, 'M', u'ڍ'),
+ (0xFB84, 'M', u'ڌ'),
+ (0xFB86, 'M', u'ڎ'),
+ (0xFB88, 'M', u'ڈ'),
+ (0xFB8A, 'M', u'ژ'),
+ (0xFB8C, 'M', u'ڑ'),
+ (0xFB8E, 'M', u'ک'),
+ (0xFB92, 'M', u'گ'),
+ (0xFB96, 'M', u'ڳ'),
+ (0xFB9A, 'M', u'ڱ'),
+ (0xFB9E, 'M', u'ں'),
+ (0xFBA0, 'M', u'ڻ'),
+ (0xFBA4, 'M', u'ۀ'),
+ (0xFBA6, 'M', u'ہ'),
+ (0xFBAA, 'M', u'ھ'),
+ (0xFBAE, 'M', u'ے'),
+ (0xFBB0, 'M', u'ۓ'),
+ (0xFBB2, 'V'),
+ (0xFBC2, 'X'),
+ (0xFBD3, 'M', u'ڭ'),
+ (0xFBD7, 'M', u'ۇ'),
+ (0xFBD9, 'M', u'ۆ'),
+ (0xFBDB, 'M', u'ۈ'),
+ (0xFBDD, 'M', u'ۇٴ'),
+ (0xFBDE, 'M', u'ۋ'),
+ (0xFBE0, 'M', u'ۅ'),
+ (0xFBE2, 'M', u'ۉ'),
+ (0xFBE4, 'M', u'ې'),
+ (0xFBE8, 'M', u'ى'),
+ (0xFBEA, 'M', u'ئا'),
+ (0xFBEC, 'M', u'ئە'),
+ (0xFBEE, 'M', u'ئو'),
+ (0xFBF0, 'M', u'ئۇ'),
+ (0xFBF2, 'M', u'ئۆ'),
+ (0xFBF4, 'M', u'ئۈ'),
+ (0xFBF6, 'M', u'ئې'),
+ (0xFBF9, 'M', u'ئى'),
+ (0xFBFC, 'M', u'ی'),
+ (0xFC00, 'M', u'ئج'),
+ (0xFC01, 'M', u'ئح'),
+ (0xFC02, 'M', u'ئم'),
+ (0xFC03, 'M', u'ئى'),
+ (0xFC04, 'M', u'ئي'),
+ (0xFC05, 'M', u'بج'),
+ (0xFC06, 'M', u'بح'),
+ (0xFC07, 'M', u'بخ'),
+ (0xFC08, 'M', u'بم'),
+ (0xFC09, 'M', u'بى'),
+ (0xFC0A, 'M', u'بي'),
+ (0xFC0B, 'M', u'تج'),
+ (0xFC0C, 'M', u'تح'),
+ (0xFC0D, 'M', u'تخ'),
+ (0xFC0E, 'M', u'تم'),
+ (0xFC0F, 'M', u'تى'),
+ (0xFC10, 'M', u'تي'),
+ (0xFC11, 'M', u'ثج'),
+ (0xFC12, 'M', u'ثم'),
+ (0xFC13, 'M', u'ثى'),
+ (0xFC14, 'M', u'ثي'),
+ (0xFC15, 'M', u'جح'),
+ (0xFC16, 'M', u'جم'),
+ (0xFC17, 'M', u'حج'),
+ (0xFC18, 'M', u'حم'),
+ (0xFC19, 'M', u'خج'),
+ (0xFC1A, 'M', u'خح'),
+ (0xFC1B, 'M', u'خم'),
+ (0xFC1C, 'M', u'سج'),
+ (0xFC1D, 'M', u'سح'),
+ (0xFC1E, 'M', u'سخ'),
+ (0xFC1F, 'M', u'سم'),
+ (0xFC20, 'M', u'صح'),
+ (0xFC21, 'M', u'صم'),
+ (0xFC22, 'M', u'ضج'),
+ (0xFC23, 'M', u'ضح'),
+ (0xFC24, 'M', u'ضخ'),
+ (0xFC25, 'M', u'ضم'),
+ (0xFC26, 'M', u'طح'),
]
-def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_45():
return [
- (0xFC57, 'M', 'يخ'),
- (0xFC58, 'M', 'يم'),
- (0xFC59, 'M', 'يى'),
- (0xFC5A, 'M', 'يي'),
- (0xFC5B, 'M', 'ذٰ'),
- (0xFC5C, 'M', 'رٰ'),
- (0xFC5D, 'M', 'ىٰ'),
- (0xFC5E, '3', ' ٌّ'),
- (0xFC5F, '3', ' ٍّ'),
- (0xFC60, '3', ' َّ'),
- (0xFC61, '3', ' ُّ'),
- (0xFC62, '3', ' ِّ'),
- (0xFC63, '3', ' ّٰ'),
- (0xFC64, 'M', 'ئر'),
- (0xFC65, 'M', 'ئز'),
- (0xFC66, 'M', 'ئم'),
- (0xFC67, 'M', 'ئن'),
- (0xFC68, 'M', 'ئى'),
- (0xFC69, 'M', 'ئي'),
- (0xFC6A, 'M', 'بر'),
- (0xFC6B, 'M', 'بز'),
- (0xFC6C, 'M', 'بم'),
- (0xFC6D, 'M', 'بن'),
- (0xFC6E, 'M', 'بى'),
- (0xFC6F, 'M', 'بي'),
- (0xFC70, 'M', 'تر'),
- (0xFC71, 'M', 'تز'),
- (0xFC72, 'M', 'تم'),
- (0xFC73, 'M', 'تن'),
- (0xFC74, 'M', 'تى'),
- (0xFC75, 'M', 'تي'),
- (0xFC76, 'M', 'ثر'),
- (0xFC77, 'M', 'ثز'),
- (0xFC78, 'M', 'ثم'),
- (0xFC79, 'M', 'ثن'),
- (0xFC7A, 'M', 'ثى'),
- (0xFC7B, 'M', 'ثي'),
- (0xFC7C, 'M', 'فى'),
- (0xFC7D, 'M', 'في'),
- (0xFC7E, 'M', 'قى'),
- (0xFC7F, 'M', 'قي'),
- (0xFC80, 'M', 'كا'),
- (0xFC81, 'M', 'كل'),
- (0xFC82, 'M', 'كم'),
- (0xFC83, 'M', 'كى'),
- (0xFC84, 'M', 'كي'),
- (0xFC85, 'M', 'لم'),
- (0xFC86, 'M', 'لى'),
- (0xFC87, 'M', 'لي'),
- (0xFC88, 'M', 'ما'),
- (0xFC89, 'M', 'مم'),
- (0xFC8A, 'M', 'نر'),
- (0xFC8B, 'M', 'نز'),
- (0xFC8C, 'M', 'نم'),
- (0xFC8D, 'M', 'نن'),
- (0xFC8E, 'M', 'نى'),
- (0xFC8F, 'M', 'ني'),
- (0xFC90, 'M', 'ىٰ'),
- (0xFC91, 'M', 'ير'),
- (0xFC92, 'M', 'يز'),
- (0xFC93, 'M', 'يم'),
- (0xFC94, 'M', 'ين'),
- (0xFC95, 'M', 'يى'),
- (0xFC96, 'M', 'يي'),
- (0xFC97, 'M', 'ئج'),
- (0xFC98, 'M', 'ئح'),
- (0xFC99, 'M', 'ئخ'),
- (0xFC9A, 'M', 'ئم'),
- (0xFC9B, 'M', 'ئه'),
- (0xFC9C, 'M', 'بج'),
- (0xFC9D, 'M', 'بح'),
- (0xFC9E, 'M', 'بخ'),
- (0xFC9F, 'M', 'بم'),
- (0xFCA0, 'M', 'به'),
- (0xFCA1, 'M', 'تج'),
- (0xFCA2, 'M', 'تح'),
- (0xFCA3, 'M', 'تخ'),
- (0xFCA4, 'M', 'تم'),
- (0xFCA5, 'M', 'ته'),
- (0xFCA6, 'M', 'ثم'),
- (0xFCA7, 'M', 'جح'),
- (0xFCA8, 'M', 'جم'),
- (0xFCA9, 'M', 'حج'),
- (0xFCAA, 'M', 'حم'),
- (0xFCAB, 'M', 'خج'),
- (0xFCAC, 'M', 'خم'),
- (0xFCAD, 'M', 'سج'),
- (0xFCAE, 'M', 'سح'),
- (0xFCAF, 'M', 'سخ'),
- (0xFCB0, 'M', 'سم'),
- (0xFCB1, 'M', 'صح'),
- (0xFCB2, 'M', 'صخ'),
- (0xFCB3, 'M', 'صم'),
- (0xFCB4, 'M', 'ضج'),
- (0xFCB5, 'M', 'ضح'),
- (0xFCB6, 'M', 'ضخ'),
- (0xFCB7, 'M', 'ضم'),
- (0xFCB8, 'M', 'طح'),
- (0xFCB9, 'M', 'ظم'),
- (0xFCBA, 'M', 'عج'),
+ (0xFC27, 'M', u'طم'),
+ (0xFC28, 'M', u'ظم'),
+ (0xFC29, 'M', u'عج'),
+ (0xFC2A, 'M', u'عم'),
+ (0xFC2B, 'M', u'غج'),
+ (0xFC2C, 'M', u'غم'),
+ (0xFC2D, 'M', u'فج'),
+ (0xFC2E, 'M', u'فح'),
+ (0xFC2F, 'M', u'فخ'),
+ (0xFC30, 'M', u'فم'),
+ (0xFC31, 'M', u'فى'),
+ (0xFC32, 'M', u'في'),
+ (0xFC33, 'M', u'قح'),
+ (0xFC34, 'M', u'قم'),
+ (0xFC35, 'M', u'قى'),
+ (0xFC36, 'M', u'قي'),
+ (0xFC37, 'M', u'كا'),
+ (0xFC38, 'M', u'كج'),
+ (0xFC39, 'M', u'كح'),
+ (0xFC3A, 'M', u'كخ'),
+ (0xFC3B, 'M', u'كل'),
+ (0xFC3C, 'M', u'كم'),
+ (0xFC3D, 'M', u'كى'),
+ (0xFC3E, 'M', u'كي'),
+ (0xFC3F, 'M', u'لج'),
+ (0xFC40, 'M', u'لح'),
+ (0xFC41, 'M', u'لخ'),
+ (0xFC42, 'M', u'لم'),
+ (0xFC43, 'M', u'لى'),
+ (0xFC44, 'M', u'لي'),
+ (0xFC45, 'M', u'مج'),
+ (0xFC46, 'M', u'مح'),
+ (0xFC47, 'M', u'مخ'),
+ (0xFC48, 'M', u'مم'),
+ (0xFC49, 'M', u'مى'),
+ (0xFC4A, 'M', u'مي'),
+ (0xFC4B, 'M', u'نج'),
+ (0xFC4C, 'M', u'نح'),
+ (0xFC4D, 'M', u'نخ'),
+ (0xFC4E, 'M', u'نم'),
+ (0xFC4F, 'M', u'نى'),
+ (0xFC50, 'M', u'ني'),
+ (0xFC51, 'M', u'هج'),
+ (0xFC52, 'M', u'هم'),
+ (0xFC53, 'M', u'هى'),
+ (0xFC54, 'M', u'هي'),
+ (0xFC55, 'M', u'يج'),
+ (0xFC56, 'M', u'يح'),
+ (0xFC57, 'M', u'يخ'),
+ (0xFC58, 'M', u'يم'),
+ (0xFC59, 'M', u'يى'),
+ (0xFC5A, 'M', u'يي'),
+ (0xFC5B, 'M', u'ذٰ'),
+ (0xFC5C, 'M', u'رٰ'),
+ (0xFC5D, 'M', u'ىٰ'),
+ (0xFC5E, '3', u' ٌّ'),
+ (0xFC5F, '3', u' ٍّ'),
+ (0xFC60, '3', u' َّ'),
+ (0xFC61, '3', u' ُّ'),
+ (0xFC62, '3', u' ِّ'),
+ (0xFC63, '3', u' ّٰ'),
+ (0xFC64, 'M', u'ئر'),
+ (0xFC65, 'M', u'ئز'),
+ (0xFC66, 'M', u'ئم'),
+ (0xFC67, 'M', u'ئن'),
+ (0xFC68, 'M', u'ئى'),
+ (0xFC69, 'M', u'ئي'),
+ (0xFC6A, 'M', u'بر'),
+ (0xFC6B, 'M', u'بز'),
+ (0xFC6C, 'M', u'بم'),
+ (0xFC6D, 'M', u'بن'),
+ (0xFC6E, 'M', u'بى'),
+ (0xFC6F, 'M', u'بي'),
+ (0xFC70, 'M', u'تر'),
+ (0xFC71, 'M', u'تز'),
+ (0xFC72, 'M', u'تم'),
+ (0xFC73, 'M', u'تن'),
+ (0xFC74, 'M', u'تى'),
+ (0xFC75, 'M', u'تي'),
+ (0xFC76, 'M', u'ثر'),
+ (0xFC77, 'M', u'ثز'),
+ (0xFC78, 'M', u'ثم'),
+ (0xFC79, 'M', u'ثن'),
+ (0xFC7A, 'M', u'ثى'),
+ (0xFC7B, 'M', u'ثي'),
+ (0xFC7C, 'M', u'فى'),
+ (0xFC7D, 'M', u'في'),
+ (0xFC7E, 'M', u'قى'),
+ (0xFC7F, 'M', u'قي'),
+ (0xFC80, 'M', u'كا'),
+ (0xFC81, 'M', u'كل'),
+ (0xFC82, 'M', u'كم'),
+ (0xFC83, 'M', u'كى'),
+ (0xFC84, 'M', u'كي'),
+ (0xFC85, 'M', u'لم'),
+ (0xFC86, 'M', u'لى'),
+ (0xFC87, 'M', u'لي'),
+ (0xFC88, 'M', u'ما'),
+ (0xFC89, 'M', u'مم'),
+ (0xFC8A, 'M', u'نر'),
]
-def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_46():
return [
- (0xFCBB, 'M', 'عم'),
- (0xFCBC, 'M', 'غج'),
- (0xFCBD, 'M', 'غم'),
- (0xFCBE, 'M', 'فج'),
- (0xFCBF, 'M', 'فح'),
- (0xFCC0, 'M', 'فخ'),
- (0xFCC1, 'M', 'فم'),
- (0xFCC2, 'M', 'قح'),
- (0xFCC3, 'M', 'قم'),
- (0xFCC4, 'M', 'كج'),
- (0xFCC5, 'M', 'كح'),
- (0xFCC6, 'M', 'كخ'),
- (0xFCC7, 'M', 'كل'),
- (0xFCC8, 'M', 'كم'),
- (0xFCC9, 'M', 'لج'),
- (0xFCCA, 'M', 'لح'),
- (0xFCCB, 'M', 'لخ'),
- (0xFCCC, 'M', 'لم'),
- (0xFCCD, 'M', 'له'),
- (0xFCCE, 'M', 'مج'),
- (0xFCCF, 'M', 'مح'),
- (0xFCD0, 'M', 'مخ'),
- (0xFCD1, 'M', 'مم'),
- (0xFCD2, 'M', 'نج'),
- (0xFCD3, 'M', 'نح'),
- (0xFCD4, 'M', 'نخ'),
- (0xFCD5, 'M', 'نم'),
- (0xFCD6, 'M', 'نه'),
- (0xFCD7, 'M', 'هج'),
- (0xFCD8, 'M', 'هم'),
- (0xFCD9, 'M', 'هٰ'),
- (0xFCDA, 'M', 'يج'),
- (0xFCDB, 'M', 'يح'),
- (0xFCDC, 'M', 'يخ'),
- (0xFCDD, 'M', 'يم'),
- (0xFCDE, 'M', 'يه'),
- (0xFCDF, 'M', 'ئم'),
- (0xFCE0, 'M', 'ئه'),
- (0xFCE1, 'M', 'بم'),
- (0xFCE2, 'M', 'به'),
- (0xFCE3, 'M', 'تم'),
- (0xFCE4, 'M', 'ته'),
- (0xFCE5, 'M', 'ثم'),
- (0xFCE6, 'M', 'ثه'),
- (0xFCE7, 'M', 'سم'),
- (0xFCE8, 'M', 'سه'),
- (0xFCE9, 'M', 'شم'),
- (0xFCEA, 'M', 'شه'),
- (0xFCEB, 'M', 'كل'),
- (0xFCEC, 'M', 'كم'),
- (0xFCED, 'M', 'لم'),
- (0xFCEE, 'M', 'نم'),
- (0xFCEF, 'M', 'نه'),
- (0xFCF0, 'M', 'يم'),
- (0xFCF1, 'M', 'يه'),
- (0xFCF2, 'M', 'ـَّ'),
- (0xFCF3, 'M', 'ـُّ'),
- (0xFCF4, 'M', 'ـِّ'),
- (0xFCF5, 'M', 'طى'),
- (0xFCF6, 'M', 'طي'),
- (0xFCF7, 'M', 'عى'),
- (0xFCF8, 'M', 'عي'),
- (0xFCF9, 'M', 'غى'),
- (0xFCFA, 'M', 'غي'),
- (0xFCFB, 'M', 'سى'),
- (0xFCFC, 'M', 'سي'),
- (0xFCFD, 'M', 'شى'),
- (0xFCFE, 'M', 'شي'),
- (0xFCFF, 'M', 'حى'),
- (0xFD00, 'M', 'حي'),
- (0xFD01, 'M', 'جى'),
- (0xFD02, 'M', 'جي'),
- (0xFD03, 'M', 'خى'),
- (0xFD04, 'M', 'خي'),
- (0xFD05, 'M', 'صى'),
- (0xFD06, 'M', 'صي'),
- (0xFD07, 'M', 'ضى'),
- (0xFD08, 'M', 'ضي'),
- (0xFD09, 'M', 'شج'),
- (0xFD0A, 'M', 'شح'),
- (0xFD0B, 'M', 'شخ'),
- (0xFD0C, 'M', 'شم'),
- (0xFD0D, 'M', 'شر'),
- (0xFD0E, 'M', 'سر'),
- (0xFD0F, 'M', 'صر'),
- (0xFD10, 'M', 'ضر'),
- (0xFD11, 'M', 'طى'),
- (0xFD12, 'M', 'طي'),
- (0xFD13, 'M', 'عى'),
- (0xFD14, 'M', 'عي'),
- (0xFD15, 'M', 'غى'),
- (0xFD16, 'M', 'غي'),
- (0xFD17, 'M', 'سى'),
- (0xFD18, 'M', 'سي'),
- (0xFD19, 'M', 'شى'),
- (0xFD1A, 'M', 'شي'),
- (0xFD1B, 'M', 'حى'),
- (0xFD1C, 'M', 'حي'),
- (0xFD1D, 'M', 'جى'),
- (0xFD1E, 'M', 'جي'),
+ (0xFC8B, 'M', u'نز'),
+ (0xFC8C, 'M', u'نم'),
+ (0xFC8D, 'M', u'نن'),
+ (0xFC8E, 'M', u'نى'),
+ (0xFC8F, 'M', u'ني'),
+ (0xFC90, 'M', u'ىٰ'),
+ (0xFC91, 'M', u'ير'),
+ (0xFC92, 'M', u'يز'),
+ (0xFC93, 'M', u'يم'),
+ (0xFC94, 'M', u'ين'),
+ (0xFC95, 'M', u'يى'),
+ (0xFC96, 'M', u'يي'),
+ (0xFC97, 'M', u'ئج'),
+ (0xFC98, 'M', u'ئح'),
+ (0xFC99, 'M', u'ئخ'),
+ (0xFC9A, 'M', u'ئم'),
+ (0xFC9B, 'M', u'ئه'),
+ (0xFC9C, 'M', u'بج'),
+ (0xFC9D, 'M', u'بح'),
+ (0xFC9E, 'M', u'بخ'),
+ (0xFC9F, 'M', u'بم'),
+ (0xFCA0, 'M', u'به'),
+ (0xFCA1, 'M', u'تج'),
+ (0xFCA2, 'M', u'تح'),
+ (0xFCA3, 'M', u'تخ'),
+ (0xFCA4, 'M', u'تم'),
+ (0xFCA5, 'M', u'ته'),
+ (0xFCA6, 'M', u'ثم'),
+ (0xFCA7, 'M', u'جح'),
+ (0xFCA8, 'M', u'جم'),
+ (0xFCA9, 'M', u'حج'),
+ (0xFCAA, 'M', u'حم'),
+ (0xFCAB, 'M', u'خج'),
+ (0xFCAC, 'M', u'خم'),
+ (0xFCAD, 'M', u'سج'),
+ (0xFCAE, 'M', u'سح'),
+ (0xFCAF, 'M', u'سخ'),
+ (0xFCB0, 'M', u'سم'),
+ (0xFCB1, 'M', u'صح'),
+ (0xFCB2, 'M', u'صخ'),
+ (0xFCB3, 'M', u'صم'),
+ (0xFCB4, 'M', u'ضج'),
+ (0xFCB5, 'M', u'ضح'),
+ (0xFCB6, 'M', u'ضخ'),
+ (0xFCB7, 'M', u'ضم'),
+ (0xFCB8, 'M', u'طح'),
+ (0xFCB9, 'M', u'ظم'),
+ (0xFCBA, 'M', u'عج'),
+ (0xFCBB, 'M', u'عم'),
+ (0xFCBC, 'M', u'غج'),
+ (0xFCBD, 'M', u'غم'),
+ (0xFCBE, 'M', u'فج'),
+ (0xFCBF, 'M', u'فح'),
+ (0xFCC0, 'M', u'فخ'),
+ (0xFCC1, 'M', u'فم'),
+ (0xFCC2, 'M', u'قح'),
+ (0xFCC3, 'M', u'قم'),
+ (0xFCC4, 'M', u'كج'),
+ (0xFCC5, 'M', u'كح'),
+ (0xFCC6, 'M', u'كخ'),
+ (0xFCC7, 'M', u'كل'),
+ (0xFCC8, 'M', u'كم'),
+ (0xFCC9, 'M', u'لج'),
+ (0xFCCA, 'M', u'لح'),
+ (0xFCCB, 'M', u'لخ'),
+ (0xFCCC, 'M', u'لم'),
+ (0xFCCD, 'M', u'له'),
+ (0xFCCE, 'M', u'مج'),
+ (0xFCCF, 'M', u'مح'),
+ (0xFCD0, 'M', u'مخ'),
+ (0xFCD1, 'M', u'مم'),
+ (0xFCD2, 'M', u'نج'),
+ (0xFCD3, 'M', u'نح'),
+ (0xFCD4, 'M', u'نخ'),
+ (0xFCD5, 'M', u'نم'),
+ (0xFCD6, 'M', u'نه'),
+ (0xFCD7, 'M', u'هج'),
+ (0xFCD8, 'M', u'هم'),
+ (0xFCD9, 'M', u'هٰ'),
+ (0xFCDA, 'M', u'يج'),
+ (0xFCDB, 'M', u'يح'),
+ (0xFCDC, 'M', u'يخ'),
+ (0xFCDD, 'M', u'يم'),
+ (0xFCDE, 'M', u'يه'),
+ (0xFCDF, 'M', u'ئم'),
+ (0xFCE0, 'M', u'ئه'),
+ (0xFCE1, 'M', u'بم'),
+ (0xFCE2, 'M', u'به'),
+ (0xFCE3, 'M', u'تم'),
+ (0xFCE4, 'M', u'ته'),
+ (0xFCE5, 'M', u'ثم'),
+ (0xFCE6, 'M', u'ثه'),
+ (0xFCE7, 'M', u'سم'),
+ (0xFCE8, 'M', u'سه'),
+ (0xFCE9, 'M', u'شم'),
+ (0xFCEA, 'M', u'شه'),
+ (0xFCEB, 'M', u'كل'),
+ (0xFCEC, 'M', u'كم'),
+ (0xFCED, 'M', u'لم'),
+ (0xFCEE, 'M', u'نم'),
]
-def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_47():
return [
- (0xFD1F, 'M', 'خى'),
- (0xFD20, 'M', 'خي'),
- (0xFD21, 'M', 'صى'),
- (0xFD22, 'M', 'صي'),
- (0xFD23, 'M', 'ضى'),
- (0xFD24, 'M', 'ضي'),
- (0xFD25, 'M', 'شج'),
- (0xFD26, 'M', 'شح'),
- (0xFD27, 'M', 'شخ'),
- (0xFD28, 'M', 'شم'),
- (0xFD29, 'M', 'شر'),
- (0xFD2A, 'M', 'سر'),
- (0xFD2B, 'M', 'صر'),
- (0xFD2C, 'M', 'ضر'),
- (0xFD2D, 'M', 'شج'),
- (0xFD2E, 'M', 'شح'),
- (0xFD2F, 'M', 'شخ'),
- (0xFD30, 'M', 'شم'),
- (0xFD31, 'M', 'سه'),
- (0xFD32, 'M', 'شه'),
- (0xFD33, 'M', 'طم'),
- (0xFD34, 'M', 'سج'),
- (0xFD35, 'M', 'سح'),
- (0xFD36, 'M', 'سخ'),
- (0xFD37, 'M', 'شج'),
- (0xFD38, 'M', 'شح'),
- (0xFD39, 'M', 'شخ'),
- (0xFD3A, 'M', 'طم'),
- (0xFD3B, 'M', 'ظم'),
- (0xFD3C, 'M', 'اً'),
+ (0xFCEF, 'M', u'نه'),
+ (0xFCF0, 'M', u'يم'),
+ (0xFCF1, 'M', u'يه'),
+ (0xFCF2, 'M', u'ـَّ'),
+ (0xFCF3, 'M', u'ـُّ'),
+ (0xFCF4, 'M', u'ـِّ'),
+ (0xFCF5, 'M', u'طى'),
+ (0xFCF6, 'M', u'طي'),
+ (0xFCF7, 'M', u'عى'),
+ (0xFCF8, 'M', u'عي'),
+ (0xFCF9, 'M', u'غى'),
+ (0xFCFA, 'M', u'غي'),
+ (0xFCFB, 'M', u'سى'),
+ (0xFCFC, 'M', u'سي'),
+ (0xFCFD, 'M', u'شى'),
+ (0xFCFE, 'M', u'شي'),
+ (0xFCFF, 'M', u'حى'),
+ (0xFD00, 'M', u'حي'),
+ (0xFD01, 'M', u'جى'),
+ (0xFD02, 'M', u'جي'),
+ (0xFD03, 'M', u'خى'),
+ (0xFD04, 'M', u'خي'),
+ (0xFD05, 'M', u'صى'),
+ (0xFD06, 'M', u'صي'),
+ (0xFD07, 'M', u'ضى'),
+ (0xFD08, 'M', u'ضي'),
+ (0xFD09, 'M', u'شج'),
+ (0xFD0A, 'M', u'شح'),
+ (0xFD0B, 'M', u'شخ'),
+ (0xFD0C, 'M', u'شم'),
+ (0xFD0D, 'M', u'شر'),
+ (0xFD0E, 'M', u'سر'),
+ (0xFD0F, 'M', u'صر'),
+ (0xFD10, 'M', u'ضر'),
+ (0xFD11, 'M', u'طى'),
+ (0xFD12, 'M', u'طي'),
+ (0xFD13, 'M', u'عى'),
+ (0xFD14, 'M', u'عي'),
+ (0xFD15, 'M', u'غى'),
+ (0xFD16, 'M', u'غي'),
+ (0xFD17, 'M', u'سى'),
+ (0xFD18, 'M', u'سي'),
+ (0xFD19, 'M', u'شى'),
+ (0xFD1A, 'M', u'شي'),
+ (0xFD1B, 'M', u'حى'),
+ (0xFD1C, 'M', u'حي'),
+ (0xFD1D, 'M', u'جى'),
+ (0xFD1E, 'M', u'جي'),
+ (0xFD1F, 'M', u'خى'),
+ (0xFD20, 'M', u'خي'),
+ (0xFD21, 'M', u'صى'),
+ (0xFD22, 'M', u'صي'),
+ (0xFD23, 'M', u'ضى'),
+ (0xFD24, 'M', u'ضي'),
+ (0xFD25, 'M', u'شج'),
+ (0xFD26, 'M', u'شح'),
+ (0xFD27, 'M', u'شخ'),
+ (0xFD28, 'M', u'شم'),
+ (0xFD29, 'M', u'شر'),
+ (0xFD2A, 'M', u'سر'),
+ (0xFD2B, 'M', u'صر'),
+ (0xFD2C, 'M', u'ضر'),
+ (0xFD2D, 'M', u'شج'),
+ (0xFD2E, 'M', u'شح'),
+ (0xFD2F, 'M', u'شخ'),
+ (0xFD30, 'M', u'شم'),
+ (0xFD31, 'M', u'سه'),
+ (0xFD32, 'M', u'شه'),
+ (0xFD33, 'M', u'طم'),
+ (0xFD34, 'M', u'سج'),
+ (0xFD35, 'M', u'سح'),
+ (0xFD36, 'M', u'سخ'),
+ (0xFD37, 'M', u'شج'),
+ (0xFD38, 'M', u'شح'),
+ (0xFD39, 'M', u'شخ'),
+ (0xFD3A, 'M', u'طم'),
+ (0xFD3B, 'M', u'ظم'),
+ (0xFD3C, 'M', u'اً'),
(0xFD3E, 'V'),
- (0xFD50, 'M', 'تجم'),
- (0xFD51, 'M', 'تحج'),
- (0xFD53, 'M', 'تحم'),
- (0xFD54, 'M', 'تخم'),
- (0xFD55, 'M', 'تمج'),
- (0xFD56, 'M', 'تمح'),
- (0xFD57, 'M', 'تمخ'),
- (0xFD58, 'M', 'جمح'),
- (0xFD5A, 'M', 'حمي'),
- (0xFD5B, 'M', 'حمى'),
- (0xFD5C, 'M', 'سحج'),
- (0xFD5D, 'M', 'سجح'),
- (0xFD5E, 'M', 'سجى'),
- (0xFD5F, 'M', 'سمح'),
- (0xFD61, 'M', 'سمج'),
- (0xFD62, 'M', 'سمم'),
- (0xFD64, 'M', 'صحح'),
- (0xFD66, 'M', 'صمم'),
- (0xFD67, 'M', 'شحم'),
- (0xFD69, 'M', 'شجي'),
- (0xFD6A, 'M', 'شمخ'),
- (0xFD6C, 'M', 'شمم'),
- (0xFD6E, 'M', 'ضحى'),
- (0xFD6F, 'M', 'ضخم'),
- (0xFD71, 'M', 'طمح'),
- (0xFD73, 'M', 'طمم'),
- (0xFD74, 'M', 'طمي'),
- (0xFD75, 'M', 'عجم'),
- (0xFD76, 'M', 'عمم'),
- (0xFD78, 'M', 'عمى'),
- (0xFD79, 'M', 'غمم'),
- (0xFD7A, 'M', 'غمي'),
- (0xFD7B, 'M', 'غمى'),
- (0xFD7C, 'M', 'فخم'),
- (0xFD7E, 'M', 'قمح'),
- (0xFD7F, 'M', 'قمم'),
- (0xFD80, 'M', 'لحم'),
- (0xFD81, 'M', 'لحي'),
- (0xFD82, 'M', 'لحى'),
- (0xFD83, 'M', 'لجج'),
- (0xFD85, 'M', 'لخم'),
- (0xFD87, 'M', 'لمح'),
- (0xFD89, 'M', 'محج'),
- (0xFD8A, 'M', 'محم'),
- (0xFD8B, 'M', 'محي'),
- (0xFD8C, 'M', 'مجح'),
- (0xFD8D, 'M', 'مجم'),
- (0xFD8E, 'M', 'مخج'),
- (0xFD8F, 'M', 'مخم'),
- (0xFD90, 'X'),
- (0xFD92, 'M', 'مجخ'),
- (0xFD93, 'M', 'همج'),
- (0xFD94, 'M', 'همم'),
- (0xFD95, 'M', 'نحم'),
- (0xFD96, 'M', 'نحى'),
- (0xFD97, 'M', 'نجم'),
- (0xFD99, 'M', 'نجى'),
- (0xFD9A, 'M', 'نمي'),
- (0xFD9B, 'M', 'نمى'),
- (0xFD9C, 'M', 'يمم'),
- (0xFD9E, 'M', 'بخي'),
- (0xFD9F, 'M', 'تجي'),
- (0xFDA0, 'M', 'تجى'),
- (0xFDA1, 'M', 'تخي'),
- (0xFDA2, 'M', 'تخى'),
- (0xFDA3, 'M', 'تمي'),
- (0xFDA4, 'M', 'تمى'),
- (0xFDA5, 'M', 'جمي'),
- (0xFDA6, 'M', 'جحى'),
+ (0xFD40, 'X'),
+ (0xFD50, 'M', u'تجم'),
+ (0xFD51, 'M', u'تحج'),
+ (0xFD53, 'M', u'تحم'),
+ (0xFD54, 'M', u'تخم'),
+ (0xFD55, 'M', u'تمج'),
+ (0xFD56, 'M', u'تمح'),
+ (0xFD57, 'M', u'تمخ'),
+ (0xFD58, 'M', u'جمح'),
+ (0xFD5A, 'M', u'حمي'),
+ (0xFD5B, 'M', u'حمى'),
+ (0xFD5C, 'M', u'سحج'),
+ (0xFD5D, 'M', u'سجح'),
+ (0xFD5E, 'M', u'سجى'),
+ (0xFD5F, 'M', u'سمح'),
+ (0xFD61, 'M', u'سمج'),
+ (0xFD62, 'M', u'سمم'),
+ (0xFD64, 'M', u'صحح'),
+ (0xFD66, 'M', u'صمم'),
+ (0xFD67, 'M', u'شحم'),
+ (0xFD69, 'M', u'شجي'),
]
-def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_48():
return [
- (0xFDA7, 'M', 'جمى'),
- (0xFDA8, 'M', 'سخى'),
- (0xFDA9, 'M', 'صحي'),
- (0xFDAA, 'M', 'شحي'),
- (0xFDAB, 'M', 'ضحي'),
- (0xFDAC, 'M', 'لجي'),
- (0xFDAD, 'M', 'لمي'),
- (0xFDAE, 'M', 'يحي'),
- (0xFDAF, 'M', 'يجي'),
- (0xFDB0, 'M', 'يمي'),
- (0xFDB1, 'M', 'ممي'),
- (0xFDB2, 'M', 'قمي'),
- (0xFDB3, 'M', 'نحي'),
- (0xFDB4, 'M', 'قمح'),
- (0xFDB5, 'M', 'لحم'),
- (0xFDB6, 'M', 'عمي'),
- (0xFDB7, 'M', 'كمي'),
- (0xFDB8, 'M', 'نجح'),
- (0xFDB9, 'M', 'مخي'),
- (0xFDBA, 'M', 'لجم'),
- (0xFDBB, 'M', 'كمم'),
- (0xFDBC, 'M', 'لجم'),
- (0xFDBD, 'M', 'نجح'),
- (0xFDBE, 'M', 'جحي'),
- (0xFDBF, 'M', 'حجي'),
- (0xFDC0, 'M', 'مجي'),
- (0xFDC1, 'M', 'فمي'),
- (0xFDC2, 'M', 'بحي'),
- (0xFDC3, 'M', 'كمم'),
- (0xFDC4, 'M', 'عجم'),
- (0xFDC5, 'M', 'صمم'),
- (0xFDC6, 'M', 'سخي'),
- (0xFDC7, 'M', 'نجي'),
+ (0xFD6A, 'M', u'شمخ'),
+ (0xFD6C, 'M', u'شمم'),
+ (0xFD6E, 'M', u'ضحى'),
+ (0xFD6F, 'M', u'ضخم'),
+ (0xFD71, 'M', u'طمح'),
+ (0xFD73, 'M', u'طمم'),
+ (0xFD74, 'M', u'طمي'),
+ (0xFD75, 'M', u'عجم'),
+ (0xFD76, 'M', u'عمم'),
+ (0xFD78, 'M', u'عمى'),
+ (0xFD79, 'M', u'غمم'),
+ (0xFD7A, 'M', u'غمي'),
+ (0xFD7B, 'M', u'غمى'),
+ (0xFD7C, 'M', u'فخم'),
+ (0xFD7E, 'M', u'قمح'),
+ (0xFD7F, 'M', u'قمم'),
+ (0xFD80, 'M', u'لحم'),
+ (0xFD81, 'M', u'لحي'),
+ (0xFD82, 'M', u'لحى'),
+ (0xFD83, 'M', u'لجج'),
+ (0xFD85, 'M', u'لخم'),
+ (0xFD87, 'M', u'لمح'),
+ (0xFD89, 'M', u'محج'),
+ (0xFD8A, 'M', u'محم'),
+ (0xFD8B, 'M', u'محي'),
+ (0xFD8C, 'M', u'مجح'),
+ (0xFD8D, 'M', u'مجم'),
+ (0xFD8E, 'M', u'مخج'),
+ (0xFD8F, 'M', u'مخم'),
+ (0xFD90, 'X'),
+ (0xFD92, 'M', u'مجخ'),
+ (0xFD93, 'M', u'همج'),
+ (0xFD94, 'M', u'همم'),
+ (0xFD95, 'M', u'نحم'),
+ (0xFD96, 'M', u'نحى'),
+ (0xFD97, 'M', u'نجم'),
+ (0xFD99, 'M', u'نجى'),
+ (0xFD9A, 'M', u'نمي'),
+ (0xFD9B, 'M', u'نمى'),
+ (0xFD9C, 'M', u'يمم'),
+ (0xFD9E, 'M', u'بخي'),
+ (0xFD9F, 'M', u'تجي'),
+ (0xFDA0, 'M', u'تجى'),
+ (0xFDA1, 'M', u'تخي'),
+ (0xFDA2, 'M', u'تخى'),
+ (0xFDA3, 'M', u'تمي'),
+ (0xFDA4, 'M', u'تمى'),
+ (0xFDA5, 'M', u'جمي'),
+ (0xFDA6, 'M', u'جحى'),
+ (0xFDA7, 'M', u'جمى'),
+ (0xFDA8, 'M', u'سخى'),
+ (0xFDA9, 'M', u'صحي'),
+ (0xFDAA, 'M', u'شحي'),
+ (0xFDAB, 'M', u'ضحي'),
+ (0xFDAC, 'M', u'لجي'),
+ (0xFDAD, 'M', u'لمي'),
+ (0xFDAE, 'M', u'يحي'),
+ (0xFDAF, 'M', u'يجي'),
+ (0xFDB0, 'M', u'يمي'),
+ (0xFDB1, 'M', u'ممي'),
+ (0xFDB2, 'M', u'قمي'),
+ (0xFDB3, 'M', u'نحي'),
+ (0xFDB4, 'M', u'قمح'),
+ (0xFDB5, 'M', u'لحم'),
+ (0xFDB6, 'M', u'عمي'),
+ (0xFDB7, 'M', u'كمي'),
+ (0xFDB8, 'M', u'نجح'),
+ (0xFDB9, 'M', u'مخي'),
+ (0xFDBA, 'M', u'لجم'),
+ (0xFDBB, 'M', u'كمم'),
+ (0xFDBC, 'M', u'لجم'),
+ (0xFDBD, 'M', u'نجح'),
+ (0xFDBE, 'M', u'جحي'),
+ (0xFDBF, 'M', u'حجي'),
+ (0xFDC0, 'M', u'مجي'),
+ (0xFDC1, 'M', u'فمي'),
+ (0xFDC2, 'M', u'بحي'),
+ (0xFDC3, 'M', u'كمم'),
+ (0xFDC4, 'M', u'عجم'),
+ (0xFDC5, 'M', u'صمم'),
+ (0xFDC6, 'M', u'سخي'),
+ (0xFDC7, 'M', u'نجي'),
(0xFDC8, 'X'),
- (0xFDCF, 'V'),
- (0xFDD0, 'X'),
- (0xFDF0, 'M', 'صلے'),
- (0xFDF1, 'M', 'قلے'),
- (0xFDF2, 'M', 'الله'),
- (0xFDF3, 'M', 'اكبر'),
- (0xFDF4, 'M', 'محمد'),
- (0xFDF5, 'M', 'صلعم'),
- (0xFDF6, 'M', 'رسول'),
- (0xFDF7, 'M', 'عليه'),
- (0xFDF8, 'M', 'وسلم'),
- (0xFDF9, 'M', 'صلى'),
- (0xFDFA, '3', 'صلى الله عليه وسلم'),
- (0xFDFB, '3', 'جل جلاله'),
- (0xFDFC, 'M', 'ریال'),
+ (0xFDF0, 'M', u'صلے'),
+ (0xFDF1, 'M', u'قلے'),
+ (0xFDF2, 'M', u'الله'),
+ (0xFDF3, 'M', u'اكبر'),
+ (0xFDF4, 'M', u'محمد'),
+ (0xFDF5, 'M', u'صلعم'),
+ (0xFDF6, 'M', u'رسول'),
+ (0xFDF7, 'M', u'عليه'),
+ (0xFDF8, 'M', u'وسلم'),
+ (0xFDF9, 'M', u'صلى'),
+ (0xFDFA, '3', u'صلى الله عليه وسلم'),
+ (0xFDFB, '3', u'جل جلاله'),
+ (0xFDFC, 'M', u'ریال'),
(0xFDFD, 'V'),
+ (0xFDFE, 'X'),
(0xFE00, 'I'),
- (0xFE10, '3', ','),
- (0xFE11, 'M', '、'),
+ (0xFE10, '3', u','),
+ ]
+
+def _seg_49():
+ return [
+ (0xFE11, 'M', u'、'),
(0xFE12, 'X'),
- (0xFE13, '3', ':'),
- (0xFE14, '3', ';'),
- (0xFE15, '3', '!'),
- (0xFE16, '3', '?'),
- (0xFE17, 'M', '〖'),
- (0xFE18, 'M', '〗'),
+ (0xFE13, '3', u':'),
+ (0xFE14, '3', u';'),
+ (0xFE15, '3', u'!'),
+ (0xFE16, '3', u'?'),
+ (0xFE17, 'M', u'〖'),
+ (0xFE18, 'M', u'〗'),
(0xFE19, 'X'),
(0xFE20, 'V'),
(0xFE30, 'X'),
- (0xFE31, 'M', '—'),
- (0xFE32, 'M', '–'),
- (0xFE33, '3', '_'),
- (0xFE35, '3', '('),
- (0xFE36, '3', ')'),
- (0xFE37, '3', '{'),
- (0xFE38, '3', '}'),
- (0xFE39, 'M', '〔'),
- (0xFE3A, 'M', '〕'),
- (0xFE3B, 'M', '【'),
- (0xFE3C, 'M', '】'),
- (0xFE3D, 'M', '《'),
- (0xFE3E, 'M', '》'),
- (0xFE3F, 'M', '〈'),
- (0xFE40, 'M', '〉'),
- (0xFE41, 'M', '「'),
- (0xFE42, 'M', '」'),
- (0xFE43, 'M', '『'),
- (0xFE44, 'M', '』'),
+ (0xFE31, 'M', u'—'),
+ (0xFE32, 'M', u'–'),
+ (0xFE33, '3', u'_'),
+ (0xFE35, '3', u'('),
+ (0xFE36, '3', u')'),
+ (0xFE37, '3', u'{'),
+ (0xFE38, '3', u'}'),
+ (0xFE39, 'M', u'〔'),
+ (0xFE3A, 'M', u'〕'),
+ (0xFE3B, 'M', u'【'),
+ (0xFE3C, 'M', u'】'),
+ (0xFE3D, 'M', u'《'),
+ (0xFE3E, 'M', u'》'),
+ (0xFE3F, 'M', u'〈'),
+ (0xFE40, 'M', u'〉'),
+ (0xFE41, 'M', u'「'),
+ (0xFE42, 'M', u'」'),
+ (0xFE43, 'M', u'『'),
+ (0xFE44, 'M', u'』'),
(0xFE45, 'V'),
- (0xFE47, '3', '['),
- (0xFE48, '3', ']'),
- (0xFE49, '3', ' ̅'),
- (0xFE4D, '3', '_'),
- (0xFE50, '3', ','),
- (0xFE51, 'M', '、'),
+ (0xFE47, '3', u'['),
+ (0xFE48, '3', u']'),
+ (0xFE49, '3', u' ̅'),
+ (0xFE4D, '3', u'_'),
+ (0xFE50, '3', u','),
+ (0xFE51, 'M', u'、'),
(0xFE52, 'X'),
- (0xFE54, '3', ';'),
- (0xFE55, '3', ':'),
- (0xFE56, '3', '?'),
- (0xFE57, '3', '!'),
- (0xFE58, 'M', '—'),
- (0xFE59, '3', '('),
- (0xFE5A, '3', ')'),
- (0xFE5B, '3', '{'),
- (0xFE5C, '3', '}'),
- (0xFE5D, 'M', '〔'),
- ]
-
-def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0xFE5E, 'M', '〕'),
- (0xFE5F, '3', '#'),
- (0xFE60, '3', '&'),
- (0xFE61, '3', '*'),
- (0xFE62, '3', '+'),
- (0xFE63, 'M', '-'),
- (0xFE64, '3', '<'),
- (0xFE65, '3', '>'),
- (0xFE66, '3', '='),
+ (0xFE54, '3', u';'),
+ (0xFE55, '3', u':'),
+ (0xFE56, '3', u'?'),
+ (0xFE57, '3', u'!'),
+ (0xFE58, 'M', u'—'),
+ (0xFE59, '3', u'('),
+ (0xFE5A, '3', u')'),
+ (0xFE5B, '3', u'{'),
+ (0xFE5C, '3', u'}'),
+ (0xFE5D, 'M', u'〔'),
+ (0xFE5E, 'M', u'〕'),
+ (0xFE5F, '3', u'#'),
+ (0xFE60, '3', u'&'),
+ (0xFE61, '3', u'*'),
+ (0xFE62, '3', u'+'),
+ (0xFE63, 'M', u'-'),
+ (0xFE64, '3', u'<'),
+ (0xFE65, '3', u'>'),
+ (0xFE66, '3', u'='),
(0xFE67, 'X'),
- (0xFE68, '3', '\\'),
- (0xFE69, '3', '$'),
- (0xFE6A, '3', '%'),
- (0xFE6B, '3', '@'),
+ (0xFE68, '3', u'\\'),
+ (0xFE69, '3', u'$'),
+ (0xFE6A, '3', u'%'),
+ (0xFE6B, '3', u'@'),
(0xFE6C, 'X'),
- (0xFE70, '3', ' ً'),
- (0xFE71, 'M', 'ـً'),
- (0xFE72, '3', ' ٌ'),
+ (0xFE70, '3', u' ً'),
+ (0xFE71, 'M', u'ـً'),
+ (0xFE72, '3', u' ٌ'),
(0xFE73, 'V'),
- (0xFE74, '3', ' ٍ'),
+ (0xFE74, '3', u' ٍ'),
(0xFE75, 'X'),
- (0xFE76, '3', ' َ'),
- (0xFE77, 'M', 'ـَ'),
- (0xFE78, '3', ' ُ'),
- (0xFE79, 'M', 'ـُ'),
- (0xFE7A, '3', ' ِ'),
- (0xFE7B, 'M', 'ـِ'),
- (0xFE7C, '3', ' ّ'),
- (0xFE7D, 'M', 'ـّ'),
- (0xFE7E, '3', ' ْ'),
- (0xFE7F, 'M', 'ـْ'),
- (0xFE80, 'M', 'ء'),
- (0xFE81, 'M', 'آ'),
- (0xFE83, 'M', 'أ'),
- (0xFE85, 'M', 'ؤ'),
- (0xFE87, 'M', 'إ'),
- (0xFE89, 'M', 'ئ'),
- (0xFE8D, 'M', 'ا'),
- (0xFE8F, 'M', 'ب'),
- (0xFE93, 'M', 'ة'),
- (0xFE95, 'M', 'ت'),
- (0xFE99, 'M', 'ث'),
- (0xFE9D, 'M', 'ج'),
- (0xFEA1, 'M', 'ح'),
- (0xFEA5, 'M', 'خ'),
- (0xFEA9, 'M', 'د'),
- (0xFEAB, 'M', 'ذ'),
- (0xFEAD, 'M', 'ر'),
- (0xFEAF, 'M', 'ز'),
- (0xFEB1, 'M', 'س'),
- (0xFEB5, 'M', 'ش'),
- (0xFEB9, 'M', 'ص'),
- (0xFEBD, 'M', 'ض'),
- (0xFEC1, 'M', 'ط'),
- (0xFEC5, 'M', 'ظ'),
- (0xFEC9, 'M', 'ع'),
- (0xFECD, 'M', 'غ'),
- (0xFED1, 'M', 'ف'),
- (0xFED5, 'M', 'ق'),
- (0xFED9, 'M', 'ك'),
- (0xFEDD, 'M', 'ل'),
- (0xFEE1, 'M', 'م'),
- (0xFEE5, 'M', 'ن'),
- (0xFEE9, 'M', 'ه'),
- (0xFEED, 'M', 'و'),
- (0xFEEF, 'M', 'ى'),
- (0xFEF1, 'M', 'ي'),
- (0xFEF5, 'M', 'لآ'),
- (0xFEF7, 'M', 'لأ'),
- (0xFEF9, 'M', 'لإ'),
- (0xFEFB, 'M', 'لا'),
+ (0xFE76, '3', u' َ'),
+ (0xFE77, 'M', u'ـَ'),
+ (0xFE78, '3', u' ُ'),
+ (0xFE79, 'M', u'ـُ'),
+ (0xFE7A, '3', u' ِ'),
+ (0xFE7B, 'M', u'ـِ'),
+ (0xFE7C, '3', u' ّ'),
+ (0xFE7D, 'M', u'ـّ'),
+ (0xFE7E, '3', u' ْ'),
+ (0xFE7F, 'M', u'ـْ'),
+ (0xFE80, 'M', u'ء'),
+ (0xFE81, 'M', u'آ'),
+ (0xFE83, 'M', u'أ'),
+ (0xFE85, 'M', u'ؤ'),
+ (0xFE87, 'M', u'إ'),
+ (0xFE89, 'M', u'ئ'),
+ (0xFE8D, 'M', u'ا'),
+ (0xFE8F, 'M', u'ب'),
+ (0xFE93, 'M', u'ة'),
+ (0xFE95, 'M', u'ت'),
+ (0xFE99, 'M', u'ث'),
+ (0xFE9D, 'M', u'ج'),
+ (0xFEA1, 'M', u'ح'),
+ (0xFEA5, 'M', u'خ'),
+ (0xFEA9, 'M', u'د'),
+ (0xFEAB, 'M', u'ذ'),
+ (0xFEAD, 'M', u'ر'),
+ (0xFEAF, 'M', u'ز'),
+ (0xFEB1, 'M', u'س'),
+ (0xFEB5, 'M', u'ش'),
+ (0xFEB9, 'M', u'ص'),
+ ]
+
+def _seg_50():
+ return [
+ (0xFEBD, 'M', u'ض'),
+ (0xFEC1, 'M', u'ط'),
+ (0xFEC5, 'M', u'ظ'),
+ (0xFEC9, 'M', u'ع'),
+ (0xFECD, 'M', u'غ'),
+ (0xFED1, 'M', u'ف'),
+ (0xFED5, 'M', u'ق'),
+ (0xFED9, 'M', u'ك'),
+ (0xFEDD, 'M', u'ل'),
+ (0xFEE1, 'M', u'م'),
+ (0xFEE5, 'M', u'ن'),
+ (0xFEE9, 'M', u'ه'),
+ (0xFEED, 'M', u'و'),
+ (0xFEEF, 'M', u'ى'),
+ (0xFEF1, 'M', u'ي'),
+ (0xFEF5, 'M', u'لآ'),
+ (0xFEF7, 'M', u'لأ'),
+ (0xFEF9, 'M', u'لإ'),
+ (0xFEFB, 'M', u'لا'),
(0xFEFD, 'X'),
(0xFEFF, 'I'),
(0xFF00, 'X'),
- (0xFF01, '3', '!'),
- (0xFF02, '3', '"'),
- (0xFF03, '3', '#'),
- (0xFF04, '3', '$'),
- (0xFF05, '3', '%'),
- (0xFF06, '3', '&'),
- (0xFF07, '3', '\''),
- (0xFF08, '3', '('),
- (0xFF09, '3', ')'),
- (0xFF0A, '3', '*'),
- (0xFF0B, '3', '+'),
- (0xFF0C, '3', ','),
- (0xFF0D, 'M', '-'),
- (0xFF0E, 'M', '.'),
- (0xFF0F, '3', '/'),
- (0xFF10, 'M', '0'),
- (0xFF11, 'M', '1'),
- (0xFF12, 'M', '2'),
- (0xFF13, 'M', '3'),
- (0xFF14, 'M', '4'),
- (0xFF15, 'M', '5'),
- (0xFF16, 'M', '6'),
- (0xFF17, 'M', '7'),
- (0xFF18, 'M', '8'),
- (0xFF19, 'M', '9'),
- (0xFF1A, '3', ':'),
+ (0xFF01, '3', u'!'),
+ (0xFF02, '3', u'"'),
+ (0xFF03, '3', u'#'),
+ (0xFF04, '3', u'$'),
+ (0xFF05, '3', u'%'),
+ (0xFF06, '3', u'&'),
+ (0xFF07, '3', u'\''),
+ (0xFF08, '3', u'('),
+ (0xFF09, '3', u')'),
+ (0xFF0A, '3', u'*'),
+ (0xFF0B, '3', u'+'),
+ (0xFF0C, '3', u','),
+ (0xFF0D, 'M', u'-'),
+ (0xFF0E, 'M', u'.'),
+ (0xFF0F, '3', u'/'),
+ (0xFF10, 'M', u'0'),
+ (0xFF11, 'M', u'1'),
+ (0xFF12, 'M', u'2'),
+ (0xFF13, 'M', u'3'),
+ (0xFF14, 'M', u'4'),
+ (0xFF15, 'M', u'5'),
+ (0xFF16, 'M', u'6'),
+ (0xFF17, 'M', u'7'),
+ (0xFF18, 'M', u'8'),
+ (0xFF19, 'M', u'9'),
+ (0xFF1A, '3', u':'),
+ (0xFF1B, '3', u';'),
+ (0xFF1C, '3', u'<'),
+ (0xFF1D, '3', u'='),
+ (0xFF1E, '3', u'>'),
+ (0xFF1F, '3', u'?'),
+ (0xFF20, '3', u'@'),
+ (0xFF21, 'M', u'a'),
+ (0xFF22, 'M', u'b'),
+ (0xFF23, 'M', u'c'),
+ (0xFF24, 'M', u'd'),
+ (0xFF25, 'M', u'e'),
+ (0xFF26, 'M', u'f'),
+ (0xFF27, 'M', u'g'),
+ (0xFF28, 'M', u'h'),
+ (0xFF29, 'M', u'i'),
+ (0xFF2A, 'M', u'j'),
+ (0xFF2B, 'M', u'k'),
+ (0xFF2C, 'M', u'l'),
+ (0xFF2D, 'M', u'm'),
+ (0xFF2E, 'M', u'n'),
+ (0xFF2F, 'M', u'o'),
+ (0xFF30, 'M', u'p'),
+ (0xFF31, 'M', u'q'),
+ (0xFF32, 'M', u'r'),
+ (0xFF33, 'M', u's'),
+ (0xFF34, 'M', u't'),
+ (0xFF35, 'M', u'u'),
+ (0xFF36, 'M', u'v'),
+ (0xFF37, 'M', u'w'),
+ (0xFF38, 'M', u'x'),
+ (0xFF39, 'M', u'y'),
+ (0xFF3A, 'M', u'z'),
+ (0xFF3B, '3', u'['),
+ (0xFF3C, '3', u'\\'),
+ (0xFF3D, '3', u']'),
+ (0xFF3E, '3', u'^'),
+ (0xFF3F, '3', u'_'),
+ (0xFF40, '3', u'`'),
+ (0xFF41, 'M', u'a'),
+ (0xFF42, 'M', u'b'),
+ (0xFF43, 'M', u'c'),
+ (0xFF44, 'M', u'd'),
+ (0xFF45, 'M', u'e'),
+ (0xFF46, 'M', u'f'),
+ (0xFF47, 'M', u'g'),
+ (0xFF48, 'M', u'h'),
+ (0xFF49, 'M', u'i'),
+ (0xFF4A, 'M', u'j'),
+ (0xFF4B, 'M', u'k'),
+ (0xFF4C, 'M', u'l'),
+ (0xFF4D, 'M', u'm'),
+ (0xFF4E, 'M', u'n'),
]
-def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_51():
return [
- (0xFF1B, '3', ';'),
- (0xFF1C, '3', '<'),
- (0xFF1D, '3', '='),
- (0xFF1E, '3', '>'),
- (0xFF1F, '3', '?'),
- (0xFF20, '3', '@'),
- (0xFF21, 'M', 'a'),
- (0xFF22, 'M', 'b'),
- (0xFF23, 'M', 'c'),
- (0xFF24, 'M', 'd'),
- (0xFF25, 'M', 'e'),
- (0xFF26, 'M', 'f'),
- (0xFF27, 'M', 'g'),
- (0xFF28, 'M', 'h'),
- (0xFF29, 'M', 'i'),
- (0xFF2A, 'M', 'j'),
- (0xFF2B, 'M', 'k'),
- (0xFF2C, 'M', 'l'),
- (0xFF2D, 'M', 'm'),
- (0xFF2E, 'M', 'n'),
- (0xFF2F, 'M', 'o'),
- (0xFF30, 'M', 'p'),
- (0xFF31, 'M', 'q'),
- (0xFF32, 'M', 'r'),
- (0xFF33, 'M', 's'),
- (0xFF34, 'M', 't'),
- (0xFF35, 'M', 'u'),
- (0xFF36, 'M', 'v'),
- (0xFF37, 'M', 'w'),
- (0xFF38, 'M', 'x'),
- (0xFF39, 'M', 'y'),
- (0xFF3A, 'M', 'z'),
- (0xFF3B, '3', '['),
- (0xFF3C, '3', '\\'),
- (0xFF3D, '3', ']'),
- (0xFF3E, '3', '^'),
- (0xFF3F, '3', '_'),
- (0xFF40, '3', '`'),
- (0xFF41, 'M', 'a'),
- (0xFF42, 'M', 'b'),
- (0xFF43, 'M', 'c'),
- (0xFF44, 'M', 'd'),
- (0xFF45, 'M', 'e'),
- (0xFF46, 'M', 'f'),
- (0xFF47, 'M', 'g'),
- (0xFF48, 'M', 'h'),
- (0xFF49, 'M', 'i'),
- (0xFF4A, 'M', 'j'),
- (0xFF4B, 'M', 'k'),
- (0xFF4C, 'M', 'l'),
- (0xFF4D, 'M', 'm'),
- (0xFF4E, 'M', 'n'),
- (0xFF4F, 'M', 'o'),
- (0xFF50, 'M', 'p'),
- (0xFF51, 'M', 'q'),
- (0xFF52, 'M', 'r'),
- (0xFF53, 'M', 's'),
- (0xFF54, 'M', 't'),
- (0xFF55, 'M', 'u'),
- (0xFF56, 'M', 'v'),
- (0xFF57, 'M', 'w'),
- (0xFF58, 'M', 'x'),
- (0xFF59, 'M', 'y'),
- (0xFF5A, 'M', 'z'),
- (0xFF5B, '3', '{'),
- (0xFF5C, '3', '|'),
- (0xFF5D, '3', '}'),
- (0xFF5E, '3', '~'),
- (0xFF5F, 'M', '⦅'),
- (0xFF60, 'M', '⦆'),
- (0xFF61, 'M', '.'),
- (0xFF62, 'M', '「'),
- (0xFF63, 'M', '」'),
- (0xFF64, 'M', '、'),
- (0xFF65, 'M', '・'),
- (0xFF66, 'M', 'ヲ'),
- (0xFF67, 'M', 'ァ'),
- (0xFF68, 'M', 'ィ'),
- (0xFF69, 'M', 'ゥ'),
- (0xFF6A, 'M', 'ェ'),
- (0xFF6B, 'M', 'ォ'),
- (0xFF6C, 'M', 'ャ'),
- (0xFF6D, 'M', 'ュ'),
- (0xFF6E, 'M', 'ョ'),
- (0xFF6F, 'M', 'ッ'),
- (0xFF70, 'M', 'ー'),
- (0xFF71, 'M', 'ア'),
- (0xFF72, 'M', 'イ'),
- (0xFF73, 'M', 'ウ'),
- (0xFF74, 'M', 'エ'),
- (0xFF75, 'M', 'オ'),
- (0xFF76, 'M', 'カ'),
- (0xFF77, 'M', 'キ'),
- (0xFF78, 'M', 'ク'),
- (0xFF79, 'M', 'ケ'),
- (0xFF7A, 'M', 'コ'),
- (0xFF7B, 'M', 'サ'),
- (0xFF7C, 'M', 'シ'),
- (0xFF7D, 'M', 'ス'),
- (0xFF7E, 'M', 'セ'),
+ (0xFF4F, 'M', u'o'),
+ (0xFF50, 'M', u'p'),
+ (0xFF51, 'M', u'q'),
+ (0xFF52, 'M', u'r'),
+ (0xFF53, 'M', u's'),
+ (0xFF54, 'M', u't'),
+ (0xFF55, 'M', u'u'),
+ (0xFF56, 'M', u'v'),
+ (0xFF57, 'M', u'w'),
+ (0xFF58, 'M', u'x'),
+ (0xFF59, 'M', u'y'),
+ (0xFF5A, 'M', u'z'),
+ (0xFF5B, '3', u'{'),
+ (0xFF5C, '3', u'|'),
+ (0xFF5D, '3', u'}'),
+ (0xFF5E, '3', u'~'),
+ (0xFF5F, 'M', u'⦅'),
+ (0xFF60, 'M', u'⦆'),
+ (0xFF61, 'M', u'.'),
+ (0xFF62, 'M', u'「'),
+ (0xFF63, 'M', u'」'),
+ (0xFF64, 'M', u'、'),
+ (0xFF65, 'M', u'・'),
+ (0xFF66, 'M', u'ヲ'),
+ (0xFF67, 'M', u'ァ'),
+ (0xFF68, 'M', u'ィ'),
+ (0xFF69, 'M', u'ゥ'),
+ (0xFF6A, 'M', u'ェ'),
+ (0xFF6B, 'M', u'ォ'),
+ (0xFF6C, 'M', u'ャ'),
+ (0xFF6D, 'M', u'ュ'),
+ (0xFF6E, 'M', u'ョ'),
+ (0xFF6F, 'M', u'ッ'),
+ (0xFF70, 'M', u'ー'),
+ (0xFF71, 'M', u'ア'),
+ (0xFF72, 'M', u'イ'),
+ (0xFF73, 'M', u'ウ'),
+ (0xFF74, 'M', u'エ'),
+ (0xFF75, 'M', u'オ'),
+ (0xFF76, 'M', u'カ'),
+ (0xFF77, 'M', u'キ'),
+ (0xFF78, 'M', u'ク'),
+ (0xFF79, 'M', u'ケ'),
+ (0xFF7A, 'M', u'コ'),
+ (0xFF7B, 'M', u'サ'),
+ (0xFF7C, 'M', u'シ'),
+ (0xFF7D, 'M', u'ス'),
+ (0xFF7E, 'M', u'セ'),
+ (0xFF7F, 'M', u'ソ'),
+ (0xFF80, 'M', u'タ'),
+ (0xFF81, 'M', u'チ'),
+ (0xFF82, 'M', u'ツ'),
+ (0xFF83, 'M', u'テ'),
+ (0xFF84, 'M', u'ト'),
+ (0xFF85, 'M', u'ナ'),
+ (0xFF86, 'M', u'ニ'),
+ (0xFF87, 'M', u'ヌ'),
+ (0xFF88, 'M', u'ネ'),
+ (0xFF89, 'M', u'ノ'),
+ (0xFF8A, 'M', u'ハ'),
+ (0xFF8B, 'M', u'ヒ'),
+ (0xFF8C, 'M', u'フ'),
+ (0xFF8D, 'M', u'ヘ'),
+ (0xFF8E, 'M', u'ホ'),
+ (0xFF8F, 'M', u'マ'),
+ (0xFF90, 'M', u'ミ'),
+ (0xFF91, 'M', u'ム'),
+ (0xFF92, 'M', u'メ'),
+ (0xFF93, 'M', u'モ'),
+ (0xFF94, 'M', u'ヤ'),
+ (0xFF95, 'M', u'ユ'),
+ (0xFF96, 'M', u'ヨ'),
+ (0xFF97, 'M', u'ラ'),
+ (0xFF98, 'M', u'リ'),
+ (0xFF99, 'M', u'ル'),
+ (0xFF9A, 'M', u'レ'),
+ (0xFF9B, 'M', u'ロ'),
+ (0xFF9C, 'M', u'ワ'),
+ (0xFF9D, 'M', u'ン'),
+ (0xFF9E, 'M', u'゙'),
+ (0xFF9F, 'M', u'゚'),
+ (0xFFA0, 'X'),
+ (0xFFA1, 'M', u'ᄀ'),
+ (0xFFA2, 'M', u'ᄁ'),
+ (0xFFA3, 'M', u'ᆪ'),
+ (0xFFA4, 'M', u'ᄂ'),
+ (0xFFA5, 'M', u'ᆬ'),
+ (0xFFA6, 'M', u'ᆭ'),
+ (0xFFA7, 'M', u'ᄃ'),
+ (0xFFA8, 'M', u'ᄄ'),
+ (0xFFA9, 'M', u'ᄅ'),
+ (0xFFAA, 'M', u'ᆰ'),
+ (0xFFAB, 'M', u'ᆱ'),
+ (0xFFAC, 'M', u'ᆲ'),
+ (0xFFAD, 'M', u'ᆳ'),
+ (0xFFAE, 'M', u'ᆴ'),
+ (0xFFAF, 'M', u'ᆵ'),
+ (0xFFB0, 'M', u'ᄚ'),
+ (0xFFB1, 'M', u'ᄆ'),
+ (0xFFB2, 'M', u'ᄇ'),
]
-def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_52():
return [
- (0xFF7F, 'M', 'ソ'),
- (0xFF80, 'M', 'タ'),
- (0xFF81, 'M', 'チ'),
- (0xFF82, 'M', 'ツ'),
- (0xFF83, 'M', 'テ'),
- (0xFF84, 'M', 'ト'),
- (0xFF85, 'M', 'ナ'),
- (0xFF86, 'M', 'ニ'),
- (0xFF87, 'M', 'ヌ'),
- (0xFF88, 'M', 'ネ'),
- (0xFF89, 'M', 'ノ'),
- (0xFF8A, 'M', 'ハ'),
- (0xFF8B, 'M', 'ヒ'),
- (0xFF8C, 'M', 'フ'),
- (0xFF8D, 'M', 'ヘ'),
- (0xFF8E, 'M', 'ホ'),
- (0xFF8F, 'M', 'マ'),
- (0xFF90, 'M', 'ミ'),
- (0xFF91, 'M', 'ム'),
- (0xFF92, 'M', 'メ'),
- (0xFF93, 'M', 'モ'),
- (0xFF94, 'M', 'ヤ'),
- (0xFF95, 'M', 'ユ'),
- (0xFF96, 'M', 'ヨ'),
- (0xFF97, 'M', 'ラ'),
- (0xFF98, 'M', 'リ'),
- (0xFF99, 'M', 'ル'),
- (0xFF9A, 'M', 'レ'),
- (0xFF9B, 'M', 'ロ'),
- (0xFF9C, 'M', 'ワ'),
- (0xFF9D, 'M', 'ン'),
- (0xFF9E, 'M', '゙'),
- (0xFF9F, 'M', '゚'),
- (0xFFA0, 'X'),
- (0xFFA1, 'M', 'ᄀ'),
- (0xFFA2, 'M', 'ᄁ'),
- (0xFFA3, 'M', 'ᆪ'),
- (0xFFA4, 'M', 'ᄂ'),
- (0xFFA5, 'M', 'ᆬ'),
- (0xFFA6, 'M', 'ᆭ'),
- (0xFFA7, 'M', 'ᄃ'),
- (0xFFA8, 'M', 'ᄄ'),
- (0xFFA9, 'M', 'ᄅ'),
- (0xFFAA, 'M', 'ᆰ'),
- (0xFFAB, 'M', 'ᆱ'),
- (0xFFAC, 'M', 'ᆲ'),
- (0xFFAD, 'M', 'ᆳ'),
- (0xFFAE, 'M', 'ᆴ'),
- (0xFFAF, 'M', 'ᆵ'),
- (0xFFB0, 'M', 'ᄚ'),
- (0xFFB1, 'M', 'ᄆ'),
- (0xFFB2, 'M', 'ᄇ'),
- (0xFFB3, 'M', 'ᄈ'),
- (0xFFB4, 'M', 'ᄡ'),
- (0xFFB5, 'M', 'ᄉ'),
- (0xFFB6, 'M', 'ᄊ'),
- (0xFFB7, 'M', 'ᄋ'),
- (0xFFB8, 'M', 'ᄌ'),
- (0xFFB9, 'M', 'ᄍ'),
- (0xFFBA, 'M', 'ᄎ'),
- (0xFFBB, 'M', 'ᄏ'),
- (0xFFBC, 'M', 'ᄐ'),
- (0xFFBD, 'M', 'ᄑ'),
- (0xFFBE, 'M', 'ᄒ'),
+ (0xFFB3, 'M', u'ᄈ'),
+ (0xFFB4, 'M', u'ᄡ'),
+ (0xFFB5, 'M', u'ᄉ'),
+ (0xFFB6, 'M', u'ᄊ'),
+ (0xFFB7, 'M', u'ᄋ'),
+ (0xFFB8, 'M', u'ᄌ'),
+ (0xFFB9, 'M', u'ᄍ'),
+ (0xFFBA, 'M', u'ᄎ'),
+ (0xFFBB, 'M', u'ᄏ'),
+ (0xFFBC, 'M', u'ᄐ'),
+ (0xFFBD, 'M', u'ᄑ'),
+ (0xFFBE, 'M', u'ᄒ'),
(0xFFBF, 'X'),
- (0xFFC2, 'M', 'ᅡ'),
- (0xFFC3, 'M', 'ᅢ'),
- (0xFFC4, 'M', 'ᅣ'),
- (0xFFC5, 'M', 'ᅤ'),
- (0xFFC6, 'M', 'ᅥ'),
- (0xFFC7, 'M', 'ᅦ'),
+ (0xFFC2, 'M', u'ᅡ'),
+ (0xFFC3, 'M', u'ᅢ'),
+ (0xFFC4, 'M', u'ᅣ'),
+ (0xFFC5, 'M', u'ᅤ'),
+ (0xFFC6, 'M', u'ᅥ'),
+ (0xFFC7, 'M', u'ᅦ'),
(0xFFC8, 'X'),
- (0xFFCA, 'M', 'ᅧ'),
- (0xFFCB, 'M', 'ᅨ'),
- (0xFFCC, 'M', 'ᅩ'),
- (0xFFCD, 'M', 'ᅪ'),
- (0xFFCE, 'M', 'ᅫ'),
- (0xFFCF, 'M', 'ᅬ'),
+ (0xFFCA, 'M', u'ᅧ'),
+ (0xFFCB, 'M', u'ᅨ'),
+ (0xFFCC, 'M', u'ᅩ'),
+ (0xFFCD, 'M', u'ᅪ'),
+ (0xFFCE, 'M', u'ᅫ'),
+ (0xFFCF, 'M', u'ᅬ'),
(0xFFD0, 'X'),
- (0xFFD2, 'M', 'ᅭ'),
- (0xFFD3, 'M', 'ᅮ'),
- (0xFFD4, 'M', 'ᅯ'),
- (0xFFD5, 'M', 'ᅰ'),
- (0xFFD6, 'M', 'ᅱ'),
- (0xFFD7, 'M', 'ᅲ'),
+ (0xFFD2, 'M', u'ᅭ'),
+ (0xFFD3, 'M', u'ᅮ'),
+ (0xFFD4, 'M', u'ᅯ'),
+ (0xFFD5, 'M', u'ᅰ'),
+ (0xFFD6, 'M', u'ᅱ'),
+ (0xFFD7, 'M', u'ᅲ'),
(0xFFD8, 'X'),
- (0xFFDA, 'M', 'ᅳ'),
- (0xFFDB, 'M', 'ᅴ'),
- (0xFFDC, 'M', 'ᅵ'),
+ (0xFFDA, 'M', u'ᅳ'),
+ (0xFFDB, 'M', u'ᅴ'),
+ (0xFFDC, 'M', u'ᅵ'),
(0xFFDD, 'X'),
- (0xFFE0, 'M', '¢'),
- (0xFFE1, 'M', '£'),
- (0xFFE2, 'M', '¬'),
- (0xFFE3, '3', ' ̄'),
- (0xFFE4, 'M', '¦'),
- (0xFFE5, 'M', '¥'),
- (0xFFE6, 'M', '₩'),
+ (0xFFE0, 'M', u'¢'),
+ (0xFFE1, 'M', u'£'),
+ (0xFFE2, 'M', u'¬'),
+ (0xFFE3, '3', u' ̄'),
+ (0xFFE4, 'M', u'¦'),
+ (0xFFE5, 'M', u'¥'),
+ (0xFFE6, 'M', u'₩'),
(0xFFE7, 'X'),
- (0xFFE8, 'M', '│'),
- (0xFFE9, 'M', '←'),
- ]
-
-def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0xFFEA, 'M', '↑'),
- (0xFFEB, 'M', '→'),
- (0xFFEC, 'M', '↓'),
- (0xFFED, 'M', '■'),
- (0xFFEE, 'M', '○'),
+ (0xFFE8, 'M', u'│'),
+ (0xFFE9, 'M', u'←'),
+ (0xFFEA, 'M', u'↑'),
+ (0xFFEB, 'M', u'→'),
+ (0xFFEC, 'M', u'↓'),
+ (0xFFED, 'M', u'■'),
+ (0xFFEE, 'M', u'○'),
(0xFFEF, 'X'),
(0x10000, 'V'),
(0x1000C, 'X'),
@@ -5549,7 +5490,7 @@ def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x10137, 'V'),
(0x1018F, 'X'),
(0x10190, 'V'),
- (0x1019D, 'X'),
+ (0x1019C, 'X'),
(0x101A0, 'V'),
(0x101A1, 'X'),
(0x101D0, 'V'),
@@ -5572,90 +5513,90 @@ def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x103C4, 'X'),
(0x103C8, 'V'),
(0x103D6, 'X'),
- (0x10400, 'M', '𐐨'),
- (0x10401, 'M', '𐐩'),
- (0x10402, 'M', '𐐪'),
- (0x10403, 'M', '𐐫'),
- (0x10404, 'M', '𐐬'),
- (0x10405, 'M', '𐐭'),
- (0x10406, 'M', '𐐮'),
- (0x10407, 'M', '𐐯'),
- (0x10408, 'M', '𐐰'),
- (0x10409, 'M', '𐐱'),
- (0x1040A, 'M', '𐐲'),
- (0x1040B, 'M', '𐐳'),
- (0x1040C, 'M', '𐐴'),
- (0x1040D, 'M', '𐐵'),
- (0x1040E, 'M', '𐐶'),
- (0x1040F, 'M', '𐐷'),
- (0x10410, 'M', '𐐸'),
- (0x10411, 'M', '𐐹'),
- (0x10412, 'M', '𐐺'),
- (0x10413, 'M', '𐐻'),
- (0x10414, 'M', '𐐼'),
- (0x10415, 'M', '𐐽'),
- (0x10416, 'M', '𐐾'),
- (0x10417, 'M', '𐐿'),
- (0x10418, 'M', '𐑀'),
- (0x10419, 'M', '𐑁'),
- (0x1041A, 'M', '𐑂'),
- (0x1041B, 'M', '𐑃'),
- (0x1041C, 'M', '𐑄'),
- (0x1041D, 'M', '𐑅'),
- (0x1041E, 'M', '𐑆'),
- (0x1041F, 'M', '𐑇'),
- (0x10420, 'M', '𐑈'),
- (0x10421, 'M', '𐑉'),
- (0x10422, 'M', '𐑊'),
- (0x10423, 'M', '𐑋'),
- (0x10424, 'M', '𐑌'),
- (0x10425, 'M', '𐑍'),
- (0x10426, 'M', '𐑎'),
- (0x10427, 'M', '𐑏'),
+ (0x10400, 'M', u'𐐨'),
+ (0x10401, 'M', u'𐐩'),
+ ]
+
+def _seg_53():
+ return [
+ (0x10402, 'M', u'𐐪'),
+ (0x10403, 'M', u'𐐫'),
+ (0x10404, 'M', u'𐐬'),
+ (0x10405, 'M', u'𐐭'),
+ (0x10406, 'M', u'𐐮'),
+ (0x10407, 'M', u'𐐯'),
+ (0x10408, 'M', u'𐐰'),
+ (0x10409, 'M', u'𐐱'),
+ (0x1040A, 'M', u'𐐲'),
+ (0x1040B, 'M', u'𐐳'),
+ (0x1040C, 'M', u'𐐴'),
+ (0x1040D, 'M', u'𐐵'),
+ (0x1040E, 'M', u'𐐶'),
+ (0x1040F, 'M', u'𐐷'),
+ (0x10410, 'M', u'𐐸'),
+ (0x10411, 'M', u'𐐹'),
+ (0x10412, 'M', u'𐐺'),
+ (0x10413, 'M', u'𐐻'),
+ (0x10414, 'M', u'𐐼'),
+ (0x10415, 'M', u'𐐽'),
+ (0x10416, 'M', u'𐐾'),
+ (0x10417, 'M', u'𐐿'),
+ (0x10418, 'M', u'𐑀'),
+ (0x10419, 'M', u'𐑁'),
+ (0x1041A, 'M', u'𐑂'),
+ (0x1041B, 'M', u'𐑃'),
+ (0x1041C, 'M', u'𐑄'),
+ (0x1041D, 'M', u'𐑅'),
+ (0x1041E, 'M', u'𐑆'),
+ (0x1041F, 'M', u'𐑇'),
+ (0x10420, 'M', u'𐑈'),
+ (0x10421, 'M', u'𐑉'),
+ (0x10422, 'M', u'𐑊'),
+ (0x10423, 'M', u'𐑋'),
+ (0x10424, 'M', u'𐑌'),
+ (0x10425, 'M', u'𐑍'),
+ (0x10426, 'M', u'𐑎'),
+ (0x10427, 'M', u'𐑏'),
(0x10428, 'V'),
(0x1049E, 'X'),
(0x104A0, 'V'),
(0x104AA, 'X'),
- (0x104B0, 'M', '𐓘'),
- (0x104B1, 'M', '𐓙'),
- (0x104B2, 'M', '𐓚'),
- (0x104B3, 'M', '𐓛'),
- (0x104B4, 'M', '𐓜'),
- (0x104B5, 'M', '𐓝'),
- ]
-
-def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x104B6, 'M', '𐓞'),
- (0x104B7, 'M', '𐓟'),
- (0x104B8, 'M', '𐓠'),
- (0x104B9, 'M', '𐓡'),
- (0x104BA, 'M', '𐓢'),
- (0x104BB, 'M', '𐓣'),
- (0x104BC, 'M', '𐓤'),
- (0x104BD, 'M', '𐓥'),
- (0x104BE, 'M', '𐓦'),
- (0x104BF, 'M', '𐓧'),
- (0x104C0, 'M', '𐓨'),
- (0x104C1, 'M', '𐓩'),
- (0x104C2, 'M', '𐓪'),
- (0x104C3, 'M', '𐓫'),
- (0x104C4, 'M', '𐓬'),
- (0x104C5, 'M', '𐓭'),
- (0x104C6, 'M', '𐓮'),
- (0x104C7, 'M', '𐓯'),
- (0x104C8, 'M', '𐓰'),
- (0x104C9, 'M', '𐓱'),
- (0x104CA, 'M', '𐓲'),
- (0x104CB, 'M', '𐓳'),
- (0x104CC, 'M', '𐓴'),
- (0x104CD, 'M', '𐓵'),
- (0x104CE, 'M', '𐓶'),
- (0x104CF, 'M', '𐓷'),
- (0x104D0, 'M', '𐓸'),
- (0x104D1, 'M', '𐓹'),
- (0x104D2, 'M', '𐓺'),
- (0x104D3, 'M', '𐓻'),
+ (0x104B0, 'M', u'𐓘'),
+ (0x104B1, 'M', u'𐓙'),
+ (0x104B2, 'M', u'𐓚'),
+ (0x104B3, 'M', u'𐓛'),
+ (0x104B4, 'M', u'𐓜'),
+ (0x104B5, 'M', u'𐓝'),
+ (0x104B6, 'M', u'𐓞'),
+ (0x104B7, 'M', u'𐓟'),
+ (0x104B8, 'M', u'𐓠'),
+ (0x104B9, 'M', u'𐓡'),
+ (0x104BA, 'M', u'𐓢'),
+ (0x104BB, 'M', u'𐓣'),
+ (0x104BC, 'M', u'𐓤'),
+ (0x104BD, 'M', u'𐓥'),
+ (0x104BE, 'M', u'𐓦'),
+ (0x104BF, 'M', u'𐓧'),
+ (0x104C0, 'M', u'𐓨'),
+ (0x104C1, 'M', u'𐓩'),
+ (0x104C2, 'M', u'𐓪'),
+ (0x104C3, 'M', u'𐓫'),
+ (0x104C4, 'M', u'𐓬'),
+ (0x104C5, 'M', u'𐓭'),
+ (0x104C6, 'M', u'𐓮'),
+ (0x104C7, 'M', u'𐓯'),
+ (0x104C8, 'M', u'𐓰'),
+ (0x104C9, 'M', u'𐓱'),
+ (0x104CA, 'M', u'𐓲'),
+ (0x104CB, 'M', u'𐓳'),
+ (0x104CC, 'M', u'𐓴'),
+ (0x104CD, 'M', u'𐓵'),
+ (0x104CE, 'M', u'𐓶'),
+ (0x104CF, 'M', u'𐓷'),
+ (0x104D0, 'M', u'𐓸'),
+ (0x104D1, 'M', u'𐓹'),
+ (0x104D2, 'M', u'𐓺'),
+ (0x104D3, 'M', u'𐓻'),
(0x104D4, 'X'),
(0x104D8, 'V'),
(0x104FC, 'X'),
@@ -5664,123 +5605,13 @@ def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x10530, 'V'),
(0x10564, 'X'),
(0x1056F, 'V'),
- (0x10570, 'M', '𐖗'),
- (0x10571, 'M', '𐖘'),
- (0x10572, 'M', '𐖙'),
- (0x10573, 'M', '𐖚'),
- (0x10574, 'M', '𐖛'),
- (0x10575, 'M', '𐖜'),
- (0x10576, 'M', '𐖝'),
- (0x10577, 'M', '𐖞'),
- (0x10578, 'M', '𐖟'),
- (0x10579, 'M', '𐖠'),
- (0x1057A, 'M', '𐖡'),
- (0x1057B, 'X'),
- (0x1057C, 'M', '𐖣'),
- (0x1057D, 'M', '𐖤'),
- (0x1057E, 'M', '𐖥'),
- (0x1057F, 'M', '𐖦'),
- (0x10580, 'M', '𐖧'),
- (0x10581, 'M', '𐖨'),
- (0x10582, 'M', '𐖩'),
- (0x10583, 'M', '𐖪'),
- (0x10584, 'M', '𐖫'),
- (0x10585, 'M', '𐖬'),
- (0x10586, 'M', '𐖭'),
- (0x10587, 'M', '𐖮'),
- (0x10588, 'M', '𐖯'),
- (0x10589, 'M', '𐖰'),
- (0x1058A, 'M', '𐖱'),
- (0x1058B, 'X'),
- (0x1058C, 'M', '𐖳'),
- (0x1058D, 'M', '𐖴'),
- (0x1058E, 'M', '𐖵'),
- (0x1058F, 'M', '𐖶'),
- (0x10590, 'M', '𐖷'),
- (0x10591, 'M', '𐖸'),
- (0x10592, 'M', '𐖹'),
- (0x10593, 'X'),
- (0x10594, 'M', '𐖻'),
- (0x10595, 'M', '𐖼'),
- (0x10596, 'X'),
- (0x10597, 'V'),
- (0x105A2, 'X'),
- (0x105A3, 'V'),
- (0x105B2, 'X'),
- (0x105B3, 'V'),
- (0x105BA, 'X'),
- (0x105BB, 'V'),
- (0x105BD, 'X'),
+ (0x10570, 'X'),
(0x10600, 'V'),
(0x10737, 'X'),
(0x10740, 'V'),
(0x10756, 'X'),
(0x10760, 'V'),
(0x10768, 'X'),
- (0x10780, 'V'),
- (0x10781, 'M', 'ː'),
- (0x10782, 'M', 'ˑ'),
- (0x10783, 'M', 'æ'),
- (0x10784, 'M', 'ʙ'),
- (0x10785, 'M', 'ɓ'),
- (0x10786, 'X'),
- (0x10787, 'M', 'ʣ'),
- (0x10788, 'M', 'ꭦ'),
- ]
-
-def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x10789, 'M', 'ʥ'),
- (0x1078A, 'M', 'ʤ'),
- (0x1078B, 'M', 'ɖ'),
- (0x1078C, 'M', 'ɗ'),
- (0x1078D, 'M', 'ᶑ'),
- (0x1078E, 'M', 'ɘ'),
- (0x1078F, 'M', 'ɞ'),
- (0x10790, 'M', 'ʩ'),
- (0x10791, 'M', 'ɤ'),
- (0x10792, 'M', 'ɢ'),
- (0x10793, 'M', 'ɠ'),
- (0x10794, 'M', 'ʛ'),
- (0x10795, 'M', 'ħ'),
- (0x10796, 'M', 'ʜ'),
- (0x10797, 'M', 'ɧ'),
- (0x10798, 'M', 'ʄ'),
- (0x10799, 'M', 'ʪ'),
- (0x1079A, 'M', 'ʫ'),
- (0x1079B, 'M', 'ɬ'),
- (0x1079C, 'M', '𝼄'),
- (0x1079D, 'M', 'ꞎ'),
- (0x1079E, 'M', 'ɮ'),
- (0x1079F, 'M', '𝼅'),
- (0x107A0, 'M', 'ʎ'),
- (0x107A1, 'M', '𝼆'),
- (0x107A2, 'M', 'ø'),
- (0x107A3, 'M', 'ɶ'),
- (0x107A4, 'M', 'ɷ'),
- (0x107A5, 'M', 'q'),
- (0x107A6, 'M', 'ɺ'),
- (0x107A7, 'M', '𝼈'),
- (0x107A8, 'M', 'ɽ'),
- (0x107A9, 'M', 'ɾ'),
- (0x107AA, 'M', 'ʀ'),
- (0x107AB, 'M', 'ʨ'),
- (0x107AC, 'M', 'ʦ'),
- (0x107AD, 'M', 'ꭧ'),
- (0x107AE, 'M', 'ʧ'),
- (0x107AF, 'M', 'ʈ'),
- (0x107B0, 'M', 'ⱱ'),
- (0x107B1, 'X'),
- (0x107B2, 'M', 'ʏ'),
- (0x107B3, 'M', 'ʡ'),
- (0x107B4, 'M', 'ʢ'),
- (0x107B5, 'M', 'ʘ'),
- (0x107B6, 'M', 'ǀ'),
- (0x107B7, 'M', 'ǁ'),
- (0x107B8, 'M', 'ǂ'),
- (0x107B9, 'M', '𝼊'),
- (0x107BA, 'M', '𝼞'),
- (0x107BB, 'X'),
(0x10800, 'V'),
(0x10806, 'X'),
(0x10808, 'V'),
@@ -5788,6 +5619,10 @@ def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1080A, 'V'),
(0x10836, 'X'),
(0x10837, 'V'),
+ ]
+
+def _seg_54():
+ return [
(0x10839, 'X'),
(0x1083C, 'V'),
(0x1083D, 'X'),
@@ -5830,10 +5665,6 @@ def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x10A60, 'V'),
(0x10AA0, 'X'),
(0x10AC0, 'V'),
- ]
-
-def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
(0x10AE7, 'X'),
(0x10AEB, 'V'),
(0x10AF7, 'X'),
@@ -5851,57 +5682,61 @@ def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x10BB0, 'X'),
(0x10C00, 'V'),
(0x10C49, 'X'),
- (0x10C80, 'M', '𐳀'),
- (0x10C81, 'M', '𐳁'),
- (0x10C82, 'M', '𐳂'),
- (0x10C83, 'M', '𐳃'),
- (0x10C84, 'M', '𐳄'),
- (0x10C85, 'M', '𐳅'),
- (0x10C86, 'M', '𐳆'),
- (0x10C87, 'M', '𐳇'),
- (0x10C88, 'M', '𐳈'),
- (0x10C89, 'M', '𐳉'),
- (0x10C8A, 'M', '𐳊'),
- (0x10C8B, 'M', '𐳋'),
- (0x10C8C, 'M', '𐳌'),
- (0x10C8D, 'M', '𐳍'),
- (0x10C8E, 'M', '𐳎'),
- (0x10C8F, 'M', '𐳏'),
- (0x10C90, 'M', '𐳐'),
- (0x10C91, 'M', '𐳑'),
- (0x10C92, 'M', '𐳒'),
- (0x10C93, 'M', '𐳓'),
- (0x10C94, 'M', '𐳔'),
- (0x10C95, 'M', '𐳕'),
- (0x10C96, 'M', '𐳖'),
- (0x10C97, 'M', '𐳗'),
- (0x10C98, 'M', '𐳘'),
- (0x10C99, 'M', '𐳙'),
- (0x10C9A, 'M', '𐳚'),
- (0x10C9B, 'M', '𐳛'),
- (0x10C9C, 'M', '𐳜'),
- (0x10C9D, 'M', '𐳝'),
- (0x10C9E, 'M', '𐳞'),
- (0x10C9F, 'M', '𐳟'),
- (0x10CA0, 'M', '𐳠'),
- (0x10CA1, 'M', '𐳡'),
- (0x10CA2, 'M', '𐳢'),
- (0x10CA3, 'M', '𐳣'),
- (0x10CA4, 'M', '𐳤'),
- (0x10CA5, 'M', '𐳥'),
- (0x10CA6, 'M', '𐳦'),
- (0x10CA7, 'M', '𐳧'),
- (0x10CA8, 'M', '𐳨'),
- (0x10CA9, 'M', '𐳩'),
- (0x10CAA, 'M', '𐳪'),
- (0x10CAB, 'M', '𐳫'),
- (0x10CAC, 'M', '𐳬'),
- (0x10CAD, 'M', '𐳭'),
- (0x10CAE, 'M', '𐳮'),
- (0x10CAF, 'M', '𐳯'),
- (0x10CB0, 'M', '𐳰'),
- (0x10CB1, 'M', '𐳱'),
- (0x10CB2, 'M', '𐳲'),
+ (0x10C80, 'M', u'𐳀'),
+ (0x10C81, 'M', u'𐳁'),
+ (0x10C82, 'M', u'𐳂'),
+ (0x10C83, 'M', u'𐳃'),
+ (0x10C84, 'M', u'𐳄'),
+ (0x10C85, 'M', u'𐳅'),
+ (0x10C86, 'M', u'𐳆'),
+ (0x10C87, 'M', u'𐳇'),
+ (0x10C88, 'M', u'𐳈'),
+ (0x10C89, 'M', u'𐳉'),
+ (0x10C8A, 'M', u'𐳊'),
+ (0x10C8B, 'M', u'𐳋'),
+ (0x10C8C, 'M', u'𐳌'),
+ (0x10C8D, 'M', u'𐳍'),
+ (0x10C8E, 'M', u'𐳎'),
+ (0x10C8F, 'M', u'𐳏'),
+ (0x10C90, 'M', u'𐳐'),
+ (0x10C91, 'M', u'𐳑'),
+ (0x10C92, 'M', u'𐳒'),
+ (0x10C93, 'M', u'𐳓'),
+ (0x10C94, 'M', u'𐳔'),
+ (0x10C95, 'M', u'𐳕'),
+ (0x10C96, 'M', u'𐳖'),
+ (0x10C97, 'M', u'𐳗'),
+ (0x10C98, 'M', u'𐳘'),
+ (0x10C99, 'M', u'𐳙'),
+ (0x10C9A, 'M', u'𐳚'),
+ (0x10C9B, 'M', u'𐳛'),
+ (0x10C9C, 'M', u'𐳜'),
+ (0x10C9D, 'M', u'𐳝'),
+ (0x10C9E, 'M', u'𐳞'),
+ (0x10C9F, 'M', u'𐳟'),
+ (0x10CA0, 'M', u'𐳠'),
+ (0x10CA1, 'M', u'𐳡'),
+ (0x10CA2, 'M', u'𐳢'),
+ (0x10CA3, 'M', u'𐳣'),
+ (0x10CA4, 'M', u'𐳤'),
+ (0x10CA5, 'M', u'𐳥'),
+ (0x10CA6, 'M', u'𐳦'),
+ (0x10CA7, 'M', u'𐳧'),
+ (0x10CA8, 'M', u'𐳨'),
+ ]
+
+def _seg_55():
+ return [
+ (0x10CA9, 'M', u'𐳩'),
+ (0x10CAA, 'M', u'𐳪'),
+ (0x10CAB, 'M', u'𐳫'),
+ (0x10CAC, 'M', u'𐳬'),
+ (0x10CAD, 'M', u'𐳭'),
+ (0x10CAE, 'M', u'𐳮'),
+ (0x10CAF, 'M', u'𐳯'),
+ (0x10CB0, 'M', u'𐳰'),
+ (0x10CB1, 'M', u'𐳱'),
+ (0x10CB2, 'M', u'𐳲'),
(0x10CB3, 'X'),
(0x10CC0, 'V'),
(0x10CF3, 'X'),
@@ -5911,34 +5746,18 @@ def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x10D3A, 'X'),
(0x10E60, 'V'),
(0x10E7F, 'X'),
- (0x10E80, 'V'),
- (0x10EAA, 'X'),
- (0x10EAB, 'V'),
- (0x10EAE, 'X'),
- (0x10EB0, 'V'),
- (0x10EB2, 'X'),
(0x10F00, 'V'),
(0x10F28, 'X'),
(0x10F30, 'V'),
(0x10F5A, 'X'),
- (0x10F70, 'V'),
- (0x10F8A, 'X'),
- (0x10FB0, 'V'),
- (0x10FCC, 'X'),
- (0x10FE0, 'V'),
- (0x10FF7, 'X'),
(0x11000, 'V'),
(0x1104E, 'X'),
(0x11052, 'V'),
- (0x11076, 'X'),
+ (0x11070, 'X'),
(0x1107F, 'V'),
(0x110BD, 'X'),
(0x110BE, 'V'),
- ]
-
-def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x110C3, 'X'),
+ (0x110C2, 'X'),
(0x110D0, 'V'),
(0x110E9, 'X'),
(0x110F0, 'V'),
@@ -5946,10 +5765,12 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x11100, 'V'),
(0x11135, 'X'),
(0x11136, 'V'),
- (0x11148, 'X'),
+ (0x11147, 'X'),
(0x11150, 'V'),
(0x11177, 'X'),
(0x11180, 'V'),
+ (0x111CE, 'X'),
+ (0x111D0, 'V'),
(0x111E0, 'X'),
(0x111E1, 'V'),
(0x111F5, 'X'),
@@ -6002,9 +5823,15 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x11370, 'V'),
(0x11375, 'X'),
(0x11400, 'V'),
+ (0x1145A, 'X'),
+ (0x1145B, 'V'),
(0x1145C, 'X'),
(0x1145D, 'V'),
- (0x11462, 'X'),
+ ]
+
+def _seg_56():
+ return [
+ (0x1145F, 'X'),
(0x11480, 'V'),
(0x114C8, 'X'),
(0x114D0, 'V'),
@@ -6020,7 +5847,7 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x11660, 'V'),
(0x1166D, 'X'),
(0x11680, 'V'),
- (0x116BA, 'X'),
+ (0x116B8, 'X'),
(0x116C0, 'V'),
(0x116CA, 'X'),
(0x11700, 'V'),
@@ -6028,74 +5855,52 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1171D, 'V'),
(0x1172C, 'X'),
(0x11730, 'V'),
- (0x11747, 'X'),
+ (0x11740, 'X'),
(0x11800, 'V'),
(0x1183C, 'X'),
- (0x118A0, 'M', '𑣀'),
- (0x118A1, 'M', '𑣁'),
- (0x118A2, 'M', '𑣂'),
- (0x118A3, 'M', '𑣃'),
- (0x118A4, 'M', '𑣄'),
- (0x118A5, 'M', '𑣅'),
- (0x118A6, 'M', '𑣆'),
- ]
-
-def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x118A7, 'M', '𑣇'),
- (0x118A8, 'M', '𑣈'),
- (0x118A9, 'M', '𑣉'),
- (0x118AA, 'M', '𑣊'),
- (0x118AB, 'M', '𑣋'),
- (0x118AC, 'M', '𑣌'),
- (0x118AD, 'M', '𑣍'),
- (0x118AE, 'M', '𑣎'),
- (0x118AF, 'M', '𑣏'),
- (0x118B0, 'M', '𑣐'),
- (0x118B1, 'M', '𑣑'),
- (0x118B2, 'M', '𑣒'),
- (0x118B3, 'M', '𑣓'),
- (0x118B4, 'M', '𑣔'),
- (0x118B5, 'M', '𑣕'),
- (0x118B6, 'M', '𑣖'),
- (0x118B7, 'M', '𑣗'),
- (0x118B8, 'M', '𑣘'),
- (0x118B9, 'M', '𑣙'),
- (0x118BA, 'M', '𑣚'),
- (0x118BB, 'M', '𑣛'),
- (0x118BC, 'M', '𑣜'),
- (0x118BD, 'M', '𑣝'),
- (0x118BE, 'M', '𑣞'),
- (0x118BF, 'M', '𑣟'),
+ (0x118A0, 'M', u'𑣀'),
+ (0x118A1, 'M', u'𑣁'),
+ (0x118A2, 'M', u'𑣂'),
+ (0x118A3, 'M', u'𑣃'),
+ (0x118A4, 'M', u'𑣄'),
+ (0x118A5, 'M', u'𑣅'),
+ (0x118A6, 'M', u'𑣆'),
+ (0x118A7, 'M', u'𑣇'),
+ (0x118A8, 'M', u'𑣈'),
+ (0x118A9, 'M', u'𑣉'),
+ (0x118AA, 'M', u'𑣊'),
+ (0x118AB, 'M', u'𑣋'),
+ (0x118AC, 'M', u'𑣌'),
+ (0x118AD, 'M', u'𑣍'),
+ (0x118AE, 'M', u'𑣎'),
+ (0x118AF, 'M', u'𑣏'),
+ (0x118B0, 'M', u'𑣐'),
+ (0x118B1, 'M', u'𑣑'),
+ (0x118B2, 'M', u'𑣒'),
+ (0x118B3, 'M', u'𑣓'),
+ (0x118B4, 'M', u'𑣔'),
+ (0x118B5, 'M', u'𑣕'),
+ (0x118B6, 'M', u'𑣖'),
+ (0x118B7, 'M', u'𑣗'),
+ (0x118B8, 'M', u'𑣘'),
+ (0x118B9, 'M', u'𑣙'),
+ (0x118BA, 'M', u'𑣚'),
+ (0x118BB, 'M', u'𑣛'),
+ (0x118BC, 'M', u'𑣜'),
+ (0x118BD, 'M', u'𑣝'),
+ (0x118BE, 'M', u'𑣞'),
+ (0x118BF, 'M', u'𑣟'),
(0x118C0, 'V'),
(0x118F3, 'X'),
(0x118FF, 'V'),
- (0x11907, 'X'),
- (0x11909, 'V'),
- (0x1190A, 'X'),
- (0x1190C, 'V'),
- (0x11914, 'X'),
- (0x11915, 'V'),
- (0x11917, 'X'),
- (0x11918, 'V'),
- (0x11936, 'X'),
- (0x11937, 'V'),
- (0x11939, 'X'),
- (0x1193B, 'V'),
- (0x11947, 'X'),
- (0x11950, 'V'),
- (0x1195A, 'X'),
- (0x119A0, 'V'),
- (0x119A8, 'X'),
- (0x119AA, 'V'),
- (0x119D8, 'X'),
- (0x119DA, 'V'),
- (0x119E5, 'X'),
+ (0x11900, 'X'),
(0x11A00, 'V'),
(0x11A48, 'X'),
(0x11A50, 'V'),
+ (0x11A84, 'X'),
+ (0x11A86, 'V'),
(0x11AA3, 'X'),
- (0x11AB0, 'V'),
+ (0x11AC0, 'V'),
(0x11AF9, 'X'),
(0x11C00, 'V'),
(0x11C09, 'X'),
@@ -6126,6 +5931,10 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x11D50, 'V'),
(0x11D5A, 'X'),
(0x11D60, 'V'),
+ ]
+
+def _seg_57():
+ return [
(0x11D66, 'X'),
(0x11D67, 'V'),
(0x11D69, 'X'),
@@ -6139,15 +5948,7 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x11DAA, 'X'),
(0x11EE0, 'V'),
(0x11EF9, 'X'),
- (0x11FB0, 'V'),
- (0x11FB1, 'X'),
- (0x11FC0, 'V'),
- ]
-
-def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x11FF2, 'X'),
- (0x11FFF, 'V'),
+ (0x12000, 'V'),
(0x1239A, 'X'),
(0x12400, 'V'),
(0x1246F, 'X'),
@@ -6155,8 +5956,6 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x12475, 'X'),
(0x12480, 'V'),
(0x12544, 'X'),
- (0x12F90, 'V'),
- (0x12FF3, 'X'),
(0x13000, 'V'),
(0x1342F, 'X'),
(0x14400, 'V'),
@@ -6168,9 +5967,7 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x16A60, 'V'),
(0x16A6A, 'X'),
(0x16A6E, 'V'),
- (0x16ABF, 'X'),
- (0x16AC0, 'V'),
- (0x16ACA, 'X'),
+ (0x16A70, 'X'),
(0x16AD0, 'V'),
(0x16AEE, 'X'),
(0x16AF0, 'V'),
@@ -6185,72 +5982,22 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x16B78, 'X'),
(0x16B7D, 'V'),
(0x16B90, 'X'),
- (0x16E40, 'M', '𖹠'),
- (0x16E41, 'M', '𖹡'),
- (0x16E42, 'M', '𖹢'),
- (0x16E43, 'M', '𖹣'),
- (0x16E44, 'M', '𖹤'),
- (0x16E45, 'M', '𖹥'),
- (0x16E46, 'M', '𖹦'),
- (0x16E47, 'M', '𖹧'),
- (0x16E48, 'M', '𖹨'),
- (0x16E49, 'M', '𖹩'),
- (0x16E4A, 'M', '𖹪'),
- (0x16E4B, 'M', '𖹫'),
- (0x16E4C, 'M', '𖹬'),
- (0x16E4D, 'M', '𖹭'),
- (0x16E4E, 'M', '𖹮'),
- (0x16E4F, 'M', '𖹯'),
- (0x16E50, 'M', '𖹰'),
- (0x16E51, 'M', '𖹱'),
- (0x16E52, 'M', '𖹲'),
- (0x16E53, 'M', '𖹳'),
- (0x16E54, 'M', '𖹴'),
- (0x16E55, 'M', '𖹵'),
- (0x16E56, 'M', '𖹶'),
- (0x16E57, 'M', '𖹷'),
- (0x16E58, 'M', '𖹸'),
- (0x16E59, 'M', '𖹹'),
- (0x16E5A, 'M', '𖹺'),
- (0x16E5B, 'M', '𖹻'),
- (0x16E5C, 'M', '𖹼'),
- (0x16E5D, 'M', '𖹽'),
- (0x16E5E, 'M', '𖹾'),
- (0x16E5F, 'M', '𖹿'),
(0x16E60, 'V'),
(0x16E9B, 'X'),
(0x16F00, 'V'),
- (0x16F4B, 'X'),
- (0x16F4F, 'V'),
- (0x16F88, 'X'),
+ (0x16F45, 'X'),
+ (0x16F50, 'V'),
+ (0x16F7F, 'X'),
(0x16F8F, 'V'),
(0x16FA0, 'X'),
(0x16FE0, 'V'),
- (0x16FE5, 'X'),
- (0x16FF0, 'V'),
- (0x16FF2, 'X'),
+ (0x16FE2, 'X'),
(0x17000, 'V'),
- (0x187F8, 'X'),
+ (0x187F2, 'X'),
(0x18800, 'V'),
- (0x18CD6, 'X'),
- (0x18D00, 'V'),
- (0x18D09, 'X'),
- (0x1AFF0, 'V'),
- (0x1AFF4, 'X'),
- (0x1AFF5, 'V'),
- (0x1AFFC, 'X'),
- (0x1AFFD, 'V'),
- (0x1AFFF, 'X'),
+ (0x18AF3, 'X'),
(0x1B000, 'V'),
- (0x1B123, 'X'),
- (0x1B150, 'V'),
- (0x1B153, 'X'),
- (0x1B164, 'V'),
- ]
-
-def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1B168, 'X'),
+ (0x1B11F, 'X'),
(0x1B170, 'V'),
(0x1B2FC, 'X'),
(0x1BC00, 'V'),
@@ -6264,36 +6011,34 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1BC9C, 'V'),
(0x1BCA0, 'I'),
(0x1BCA4, 'X'),
- (0x1CF00, 'V'),
- (0x1CF2E, 'X'),
- (0x1CF30, 'V'),
- (0x1CF47, 'X'),
- (0x1CF50, 'V'),
- (0x1CFC4, 'X'),
(0x1D000, 'V'),
(0x1D0F6, 'X'),
(0x1D100, 'V'),
(0x1D127, 'X'),
(0x1D129, 'V'),
- (0x1D15E, 'M', '𝅗𝅥'),
- (0x1D15F, 'M', '𝅘𝅥'),
- (0x1D160, 'M', '𝅘𝅥𝅮'),
- (0x1D161, 'M', '𝅘𝅥𝅯'),
- (0x1D162, 'M', '𝅘𝅥𝅰'),
- (0x1D163, 'M', '𝅘𝅥𝅱'),
- (0x1D164, 'M', '𝅘𝅥𝅲'),
+ (0x1D15E, 'M', u'𝅗𝅥'),
+ (0x1D15F, 'M', u'𝅘𝅥'),
+ (0x1D160, 'M', u'𝅘𝅥𝅮'),
+ (0x1D161, 'M', u'𝅘𝅥𝅯'),
+ (0x1D162, 'M', u'𝅘𝅥𝅰'),
+ (0x1D163, 'M', u'𝅘𝅥𝅱'),
+ (0x1D164, 'M', u'𝅘𝅥𝅲'),
(0x1D165, 'V'),
(0x1D173, 'X'),
(0x1D17B, 'V'),
- (0x1D1BB, 'M', '𝆹𝅥'),
- (0x1D1BC, 'M', '𝆺𝅥'),
- (0x1D1BD, 'M', '𝆹𝅥𝅮'),
- (0x1D1BE, 'M', '𝆺𝅥𝅮'),
- (0x1D1BF, 'M', '𝆹𝅥𝅯'),
- (0x1D1C0, 'M', '𝆺𝅥𝅯'),
+ (0x1D1BB, 'M', u'𝆹𝅥'),
+ (0x1D1BC, 'M', u'𝆺𝅥'),
+ (0x1D1BD, 'M', u'𝆹𝅥𝅮'),
+ (0x1D1BE, 'M', u'𝆺𝅥𝅮'),
+ (0x1D1BF, 'M', u'𝆹𝅥𝅯'),
+ (0x1D1C0, 'M', u'𝆺𝅥𝅯'),
(0x1D1C1, 'V'),
- (0x1D1EB, 'X'),
+ (0x1D1E9, 'X'),
(0x1D200, 'V'),
+ ]
+
+def _seg_58():
+ return [
(0x1D246, 'X'),
(0x1D2E0, 'V'),
(0x1D2F4, 'X'),
@@ -6301,1064 +6046,1062 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1D357, 'X'),
(0x1D360, 'V'),
(0x1D379, 'X'),
- (0x1D400, 'M', 'a'),
- (0x1D401, 'M', 'b'),
- (0x1D402, 'M', 'c'),
- (0x1D403, 'M', 'd'),
- (0x1D404, 'M', 'e'),
- (0x1D405, 'M', 'f'),
- (0x1D406, 'M', 'g'),
- (0x1D407, 'M', 'h'),
- (0x1D408, 'M', 'i'),
- (0x1D409, 'M', 'j'),
- (0x1D40A, 'M', 'k'),
- (0x1D40B, 'M', 'l'),
- (0x1D40C, 'M', 'm'),
- (0x1D40D, 'M', 'n'),
- (0x1D40E, 'M', 'o'),
- (0x1D40F, 'M', 'p'),
- (0x1D410, 'M', 'q'),
- (0x1D411, 'M', 'r'),
- (0x1D412, 'M', 's'),
- (0x1D413, 'M', 't'),
- (0x1D414, 'M', 'u'),
- (0x1D415, 'M', 'v'),
- (0x1D416, 'M', 'w'),
- (0x1D417, 'M', 'x'),
- (0x1D418, 'M', 'y'),
- (0x1D419, 'M', 'z'),
- (0x1D41A, 'M', 'a'),
- (0x1D41B, 'M', 'b'),
- (0x1D41C, 'M', 'c'),
- (0x1D41D, 'M', 'd'),
- (0x1D41E, 'M', 'e'),
- (0x1D41F, 'M', 'f'),
- (0x1D420, 'M', 'g'),
- (0x1D421, 'M', 'h'),
- (0x1D422, 'M', 'i'),
- (0x1D423, 'M', 'j'),
- (0x1D424, 'M', 'k'),
- (0x1D425, 'M', 'l'),
- (0x1D426, 'M', 'm'),
- (0x1D427, 'M', 'n'),
- (0x1D428, 'M', 'o'),
- (0x1D429, 'M', 'p'),
- (0x1D42A, 'M', 'q'),
- (0x1D42B, 'M', 'r'),
- (0x1D42C, 'M', 's'),
- (0x1D42D, 'M', 't'),
- (0x1D42E, 'M', 'u'),
- (0x1D42F, 'M', 'v'),
- (0x1D430, 'M', 'w'),
- ]
-
-def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1D431, 'M', 'x'),
- (0x1D432, 'M', 'y'),
- (0x1D433, 'M', 'z'),
- (0x1D434, 'M', 'a'),
- (0x1D435, 'M', 'b'),
- (0x1D436, 'M', 'c'),
- (0x1D437, 'M', 'd'),
- (0x1D438, 'M', 'e'),
- (0x1D439, 'M', 'f'),
- (0x1D43A, 'M', 'g'),
- (0x1D43B, 'M', 'h'),
- (0x1D43C, 'M', 'i'),
- (0x1D43D, 'M', 'j'),
- (0x1D43E, 'M', 'k'),
- (0x1D43F, 'M', 'l'),
- (0x1D440, 'M', 'm'),
- (0x1D441, 'M', 'n'),
- (0x1D442, 'M', 'o'),
- (0x1D443, 'M', 'p'),
- (0x1D444, 'M', 'q'),
- (0x1D445, 'M', 'r'),
- (0x1D446, 'M', 's'),
- (0x1D447, 'M', 't'),
- (0x1D448, 'M', 'u'),
- (0x1D449, 'M', 'v'),
- (0x1D44A, 'M', 'w'),
- (0x1D44B, 'M', 'x'),
- (0x1D44C, 'M', 'y'),
- (0x1D44D, 'M', 'z'),
- (0x1D44E, 'M', 'a'),
- (0x1D44F, 'M', 'b'),
- (0x1D450, 'M', 'c'),
- (0x1D451, 'M', 'd'),
- (0x1D452, 'M', 'e'),
- (0x1D453, 'M', 'f'),
- (0x1D454, 'M', 'g'),
+ (0x1D400, 'M', u'a'),
+ (0x1D401, 'M', u'b'),
+ (0x1D402, 'M', u'c'),
+ (0x1D403, 'M', u'd'),
+ (0x1D404, 'M', u'e'),
+ (0x1D405, 'M', u'f'),
+ (0x1D406, 'M', u'g'),
+ (0x1D407, 'M', u'h'),
+ (0x1D408, 'M', u'i'),
+ (0x1D409, 'M', u'j'),
+ (0x1D40A, 'M', u'k'),
+ (0x1D40B, 'M', u'l'),
+ (0x1D40C, 'M', u'm'),
+ (0x1D40D, 'M', u'n'),
+ (0x1D40E, 'M', u'o'),
+ (0x1D40F, 'M', u'p'),
+ (0x1D410, 'M', u'q'),
+ (0x1D411, 'M', u'r'),
+ (0x1D412, 'M', u's'),
+ (0x1D413, 'M', u't'),
+ (0x1D414, 'M', u'u'),
+ (0x1D415, 'M', u'v'),
+ (0x1D416, 'M', u'w'),
+ (0x1D417, 'M', u'x'),
+ (0x1D418, 'M', u'y'),
+ (0x1D419, 'M', u'z'),
+ (0x1D41A, 'M', u'a'),
+ (0x1D41B, 'M', u'b'),
+ (0x1D41C, 'M', u'c'),
+ (0x1D41D, 'M', u'd'),
+ (0x1D41E, 'M', u'e'),
+ (0x1D41F, 'M', u'f'),
+ (0x1D420, 'M', u'g'),
+ (0x1D421, 'M', u'h'),
+ (0x1D422, 'M', u'i'),
+ (0x1D423, 'M', u'j'),
+ (0x1D424, 'M', u'k'),
+ (0x1D425, 'M', u'l'),
+ (0x1D426, 'M', u'm'),
+ (0x1D427, 'M', u'n'),
+ (0x1D428, 'M', u'o'),
+ (0x1D429, 'M', u'p'),
+ (0x1D42A, 'M', u'q'),
+ (0x1D42B, 'M', u'r'),
+ (0x1D42C, 'M', u's'),
+ (0x1D42D, 'M', u't'),
+ (0x1D42E, 'M', u'u'),
+ (0x1D42F, 'M', u'v'),
+ (0x1D430, 'M', u'w'),
+ (0x1D431, 'M', u'x'),
+ (0x1D432, 'M', u'y'),
+ (0x1D433, 'M', u'z'),
+ (0x1D434, 'M', u'a'),
+ (0x1D435, 'M', u'b'),
+ (0x1D436, 'M', u'c'),
+ (0x1D437, 'M', u'd'),
+ (0x1D438, 'M', u'e'),
+ (0x1D439, 'M', u'f'),
+ (0x1D43A, 'M', u'g'),
+ (0x1D43B, 'M', u'h'),
+ (0x1D43C, 'M', u'i'),
+ (0x1D43D, 'M', u'j'),
+ (0x1D43E, 'M', u'k'),
+ (0x1D43F, 'M', u'l'),
+ (0x1D440, 'M', u'm'),
+ (0x1D441, 'M', u'n'),
+ (0x1D442, 'M', u'o'),
+ (0x1D443, 'M', u'p'),
+ (0x1D444, 'M', u'q'),
+ (0x1D445, 'M', u'r'),
+ (0x1D446, 'M', u's'),
+ (0x1D447, 'M', u't'),
+ (0x1D448, 'M', u'u'),
+ (0x1D449, 'M', u'v'),
+ (0x1D44A, 'M', u'w'),
+ (0x1D44B, 'M', u'x'),
+ (0x1D44C, 'M', u'y'),
+ (0x1D44D, 'M', u'z'),
+ (0x1D44E, 'M', u'a'),
+ (0x1D44F, 'M', u'b'),
+ (0x1D450, 'M', u'c'),
+ (0x1D451, 'M', u'd'),
+ (0x1D452, 'M', u'e'),
+ (0x1D453, 'M', u'f'),
+ (0x1D454, 'M', u'g'),
(0x1D455, 'X'),
- (0x1D456, 'M', 'i'),
- (0x1D457, 'M', 'j'),
- (0x1D458, 'M', 'k'),
- (0x1D459, 'M', 'l'),
- (0x1D45A, 'M', 'm'),
- (0x1D45B, 'M', 'n'),
- (0x1D45C, 'M', 'o'),
- (0x1D45D, 'M', 'p'),
- (0x1D45E, 'M', 'q'),
- (0x1D45F, 'M', 'r'),
- (0x1D460, 'M', 's'),
- (0x1D461, 'M', 't'),
- (0x1D462, 'M', 'u'),
- (0x1D463, 'M', 'v'),
- (0x1D464, 'M', 'w'),
- (0x1D465, 'M', 'x'),
- (0x1D466, 'M', 'y'),
- (0x1D467, 'M', 'z'),
- (0x1D468, 'M', 'a'),
- (0x1D469, 'M', 'b'),
- (0x1D46A, 'M', 'c'),
- (0x1D46B, 'M', 'd'),
- (0x1D46C, 'M', 'e'),
- (0x1D46D, 'M', 'f'),
- (0x1D46E, 'M', 'g'),
- (0x1D46F, 'M', 'h'),
- (0x1D470, 'M', 'i'),
- (0x1D471, 'M', 'j'),
- (0x1D472, 'M', 'k'),
- (0x1D473, 'M', 'l'),
- (0x1D474, 'M', 'm'),
- (0x1D475, 'M', 'n'),
- (0x1D476, 'M', 'o'),
- (0x1D477, 'M', 'p'),
- (0x1D478, 'M', 'q'),
- (0x1D479, 'M', 'r'),
- (0x1D47A, 'M', 's'),
- (0x1D47B, 'M', 't'),
- (0x1D47C, 'M', 'u'),
- (0x1D47D, 'M', 'v'),
- (0x1D47E, 'M', 'w'),
- (0x1D47F, 'M', 'x'),
- (0x1D480, 'M', 'y'),
- (0x1D481, 'M', 'z'),
- (0x1D482, 'M', 'a'),
- (0x1D483, 'M', 'b'),
- (0x1D484, 'M', 'c'),
- (0x1D485, 'M', 'd'),
- (0x1D486, 'M', 'e'),
- (0x1D487, 'M', 'f'),
- (0x1D488, 'M', 'g'),
- (0x1D489, 'M', 'h'),
- (0x1D48A, 'M', 'i'),
- (0x1D48B, 'M', 'j'),
- (0x1D48C, 'M', 'k'),
- (0x1D48D, 'M', 'l'),
- (0x1D48E, 'M', 'm'),
- (0x1D48F, 'M', 'n'),
- (0x1D490, 'M', 'o'),
- (0x1D491, 'M', 'p'),
- (0x1D492, 'M', 'q'),
- (0x1D493, 'M', 'r'),
- (0x1D494, 'M', 's'),
+ (0x1D456, 'M', u'i'),
+ (0x1D457, 'M', u'j'),
+ (0x1D458, 'M', u'k'),
+ (0x1D459, 'M', u'l'),
+ (0x1D45A, 'M', u'm'),
+ (0x1D45B, 'M', u'n'),
+ (0x1D45C, 'M', u'o'),
]
-def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_59():
return [
- (0x1D495, 'M', 't'),
- (0x1D496, 'M', 'u'),
- (0x1D497, 'M', 'v'),
- (0x1D498, 'M', 'w'),
- (0x1D499, 'M', 'x'),
- (0x1D49A, 'M', 'y'),
- (0x1D49B, 'M', 'z'),
- (0x1D49C, 'M', 'a'),
+ (0x1D45D, 'M', u'p'),
+ (0x1D45E, 'M', u'q'),
+ (0x1D45F, 'M', u'r'),
+ (0x1D460, 'M', u's'),
+ (0x1D461, 'M', u't'),
+ (0x1D462, 'M', u'u'),
+ (0x1D463, 'M', u'v'),
+ (0x1D464, 'M', u'w'),
+ (0x1D465, 'M', u'x'),
+ (0x1D466, 'M', u'y'),
+ (0x1D467, 'M', u'z'),
+ (0x1D468, 'M', u'a'),
+ (0x1D469, 'M', u'b'),
+ (0x1D46A, 'M', u'c'),
+ (0x1D46B, 'M', u'd'),
+ (0x1D46C, 'M', u'e'),
+ (0x1D46D, 'M', u'f'),
+ (0x1D46E, 'M', u'g'),
+ (0x1D46F, 'M', u'h'),
+ (0x1D470, 'M', u'i'),
+ (0x1D471, 'M', u'j'),
+ (0x1D472, 'M', u'k'),
+ (0x1D473, 'M', u'l'),
+ (0x1D474, 'M', u'm'),
+ (0x1D475, 'M', u'n'),
+ (0x1D476, 'M', u'o'),
+ (0x1D477, 'M', u'p'),
+ (0x1D478, 'M', u'q'),
+ (0x1D479, 'M', u'r'),
+ (0x1D47A, 'M', u's'),
+ (0x1D47B, 'M', u't'),
+ (0x1D47C, 'M', u'u'),
+ (0x1D47D, 'M', u'v'),
+ (0x1D47E, 'M', u'w'),
+ (0x1D47F, 'M', u'x'),
+ (0x1D480, 'M', u'y'),
+ (0x1D481, 'M', u'z'),
+ (0x1D482, 'M', u'a'),
+ (0x1D483, 'M', u'b'),
+ (0x1D484, 'M', u'c'),
+ (0x1D485, 'M', u'd'),
+ (0x1D486, 'M', u'e'),
+ (0x1D487, 'M', u'f'),
+ (0x1D488, 'M', u'g'),
+ (0x1D489, 'M', u'h'),
+ (0x1D48A, 'M', u'i'),
+ (0x1D48B, 'M', u'j'),
+ (0x1D48C, 'M', u'k'),
+ (0x1D48D, 'M', u'l'),
+ (0x1D48E, 'M', u'm'),
+ (0x1D48F, 'M', u'n'),
+ (0x1D490, 'M', u'o'),
+ (0x1D491, 'M', u'p'),
+ (0x1D492, 'M', u'q'),
+ (0x1D493, 'M', u'r'),
+ (0x1D494, 'M', u's'),
+ (0x1D495, 'M', u't'),
+ (0x1D496, 'M', u'u'),
+ (0x1D497, 'M', u'v'),
+ (0x1D498, 'M', u'w'),
+ (0x1D499, 'M', u'x'),
+ (0x1D49A, 'M', u'y'),
+ (0x1D49B, 'M', u'z'),
+ (0x1D49C, 'M', u'a'),
(0x1D49D, 'X'),
- (0x1D49E, 'M', 'c'),
- (0x1D49F, 'M', 'd'),
+ (0x1D49E, 'M', u'c'),
+ (0x1D49F, 'M', u'd'),
(0x1D4A0, 'X'),
- (0x1D4A2, 'M', 'g'),
+ (0x1D4A2, 'M', u'g'),
(0x1D4A3, 'X'),
- (0x1D4A5, 'M', 'j'),
- (0x1D4A6, 'M', 'k'),
+ (0x1D4A5, 'M', u'j'),
+ (0x1D4A6, 'M', u'k'),
(0x1D4A7, 'X'),
- (0x1D4A9, 'M', 'n'),
- (0x1D4AA, 'M', 'o'),
- (0x1D4AB, 'M', 'p'),
- (0x1D4AC, 'M', 'q'),
+ (0x1D4A9, 'M', u'n'),
+ (0x1D4AA, 'M', u'o'),
+ (0x1D4AB, 'M', u'p'),
+ (0x1D4AC, 'M', u'q'),
(0x1D4AD, 'X'),
- (0x1D4AE, 'M', 's'),
- (0x1D4AF, 'M', 't'),
- (0x1D4B0, 'M', 'u'),
- (0x1D4B1, 'M', 'v'),
- (0x1D4B2, 'M', 'w'),
- (0x1D4B3, 'M', 'x'),
- (0x1D4B4, 'M', 'y'),
- (0x1D4B5, 'M', 'z'),
- (0x1D4B6, 'M', 'a'),
- (0x1D4B7, 'M', 'b'),
- (0x1D4B8, 'M', 'c'),
- (0x1D4B9, 'M', 'd'),
+ (0x1D4AE, 'M', u's'),
+ (0x1D4AF, 'M', u't'),
+ (0x1D4B0, 'M', u'u'),
+ (0x1D4B1, 'M', u'v'),
+ (0x1D4B2, 'M', u'w'),
+ (0x1D4B3, 'M', u'x'),
+ (0x1D4B4, 'M', u'y'),
+ (0x1D4B5, 'M', u'z'),
+ (0x1D4B6, 'M', u'a'),
+ (0x1D4B7, 'M', u'b'),
+ (0x1D4B8, 'M', u'c'),
+ (0x1D4B9, 'M', u'd'),
(0x1D4BA, 'X'),
- (0x1D4BB, 'M', 'f'),
+ (0x1D4BB, 'M', u'f'),
(0x1D4BC, 'X'),
- (0x1D4BD, 'M', 'h'),
- (0x1D4BE, 'M', 'i'),
- (0x1D4BF, 'M', 'j'),
- (0x1D4C0, 'M', 'k'),
- (0x1D4C1, 'M', 'l'),
- (0x1D4C2, 'M', 'm'),
- (0x1D4C3, 'M', 'n'),
- (0x1D4C4, 'X'),
- (0x1D4C5, 'M', 'p'),
- (0x1D4C6, 'M', 'q'),
- (0x1D4C7, 'M', 'r'),
- (0x1D4C8, 'M', 's'),
- (0x1D4C9, 'M', 't'),
- (0x1D4CA, 'M', 'u'),
- (0x1D4CB, 'M', 'v'),
- (0x1D4CC, 'M', 'w'),
- (0x1D4CD, 'M', 'x'),
- (0x1D4CE, 'M', 'y'),
- (0x1D4CF, 'M', 'z'),
- (0x1D4D0, 'M', 'a'),
- (0x1D4D1, 'M', 'b'),
- (0x1D4D2, 'M', 'c'),
- (0x1D4D3, 'M', 'd'),
- (0x1D4D4, 'M', 'e'),
- (0x1D4D5, 'M', 'f'),
- (0x1D4D6, 'M', 'g'),
- (0x1D4D7, 'M', 'h'),
- (0x1D4D8, 'M', 'i'),
- (0x1D4D9, 'M', 'j'),
- (0x1D4DA, 'M', 'k'),
- (0x1D4DB, 'M', 'l'),
- (0x1D4DC, 'M', 'm'),
- (0x1D4DD, 'M', 'n'),
- (0x1D4DE, 'M', 'o'),
- (0x1D4DF, 'M', 'p'),
- (0x1D4E0, 'M', 'q'),
- (0x1D4E1, 'M', 'r'),
- (0x1D4E2, 'M', 's'),
- (0x1D4E3, 'M', 't'),
- (0x1D4E4, 'M', 'u'),
- (0x1D4E5, 'M', 'v'),
- (0x1D4E6, 'M', 'w'),
- (0x1D4E7, 'M', 'x'),
- (0x1D4E8, 'M', 'y'),
- (0x1D4E9, 'M', 'z'),
- (0x1D4EA, 'M', 'a'),
- (0x1D4EB, 'M', 'b'),
- (0x1D4EC, 'M', 'c'),
- (0x1D4ED, 'M', 'd'),
- (0x1D4EE, 'M', 'e'),
- (0x1D4EF, 'M', 'f'),
- (0x1D4F0, 'M', 'g'),
- (0x1D4F1, 'M', 'h'),
- (0x1D4F2, 'M', 'i'),
- (0x1D4F3, 'M', 'j'),
- (0x1D4F4, 'M', 'k'),
- (0x1D4F5, 'M', 'l'),
- (0x1D4F6, 'M', 'm'),
- (0x1D4F7, 'M', 'n'),
- (0x1D4F8, 'M', 'o'),
- (0x1D4F9, 'M', 'p'),
- (0x1D4FA, 'M', 'q'),
- (0x1D4FB, 'M', 'r'),
+ (0x1D4BD, 'M', u'h'),
+ (0x1D4BE, 'M', u'i'),
+ (0x1D4BF, 'M', u'j'),
+ (0x1D4C0, 'M', u'k'),
+ (0x1D4C1, 'M', u'l'),
+ (0x1D4C2, 'M', u'm'),
+ (0x1D4C3, 'M', u'n'),
]
-def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_60():
return [
- (0x1D4FC, 'M', 's'),
- (0x1D4FD, 'M', 't'),
- (0x1D4FE, 'M', 'u'),
- (0x1D4FF, 'M', 'v'),
- (0x1D500, 'M', 'w'),
- (0x1D501, 'M', 'x'),
- (0x1D502, 'M', 'y'),
- (0x1D503, 'M', 'z'),
- (0x1D504, 'M', 'a'),
- (0x1D505, 'M', 'b'),
+ (0x1D4C4, 'X'),
+ (0x1D4C5, 'M', u'p'),
+ (0x1D4C6, 'M', u'q'),
+ (0x1D4C7, 'M', u'r'),
+ (0x1D4C8, 'M', u's'),
+ (0x1D4C9, 'M', u't'),
+ (0x1D4CA, 'M', u'u'),
+ (0x1D4CB, 'M', u'v'),
+ (0x1D4CC, 'M', u'w'),
+ (0x1D4CD, 'M', u'x'),
+ (0x1D4CE, 'M', u'y'),
+ (0x1D4CF, 'M', u'z'),
+ (0x1D4D0, 'M', u'a'),
+ (0x1D4D1, 'M', u'b'),
+ (0x1D4D2, 'M', u'c'),
+ (0x1D4D3, 'M', u'd'),
+ (0x1D4D4, 'M', u'e'),
+ (0x1D4D5, 'M', u'f'),
+ (0x1D4D6, 'M', u'g'),
+ (0x1D4D7, 'M', u'h'),
+ (0x1D4D8, 'M', u'i'),
+ (0x1D4D9, 'M', u'j'),
+ (0x1D4DA, 'M', u'k'),
+ (0x1D4DB, 'M', u'l'),
+ (0x1D4DC, 'M', u'm'),
+ (0x1D4DD, 'M', u'n'),
+ (0x1D4DE, 'M', u'o'),
+ (0x1D4DF, 'M', u'p'),
+ (0x1D4E0, 'M', u'q'),
+ (0x1D4E1, 'M', u'r'),
+ (0x1D4E2, 'M', u's'),
+ (0x1D4E3, 'M', u't'),
+ (0x1D4E4, 'M', u'u'),
+ (0x1D4E5, 'M', u'v'),
+ (0x1D4E6, 'M', u'w'),
+ (0x1D4E7, 'M', u'x'),
+ (0x1D4E8, 'M', u'y'),
+ (0x1D4E9, 'M', u'z'),
+ (0x1D4EA, 'M', u'a'),
+ (0x1D4EB, 'M', u'b'),
+ (0x1D4EC, 'M', u'c'),
+ (0x1D4ED, 'M', u'd'),
+ (0x1D4EE, 'M', u'e'),
+ (0x1D4EF, 'M', u'f'),
+ (0x1D4F0, 'M', u'g'),
+ (0x1D4F1, 'M', u'h'),
+ (0x1D4F2, 'M', u'i'),
+ (0x1D4F3, 'M', u'j'),
+ (0x1D4F4, 'M', u'k'),
+ (0x1D4F5, 'M', u'l'),
+ (0x1D4F6, 'M', u'm'),
+ (0x1D4F7, 'M', u'n'),
+ (0x1D4F8, 'M', u'o'),
+ (0x1D4F9, 'M', u'p'),
+ (0x1D4FA, 'M', u'q'),
+ (0x1D4FB, 'M', u'r'),
+ (0x1D4FC, 'M', u's'),
+ (0x1D4FD, 'M', u't'),
+ (0x1D4FE, 'M', u'u'),
+ (0x1D4FF, 'M', u'v'),
+ (0x1D500, 'M', u'w'),
+ (0x1D501, 'M', u'x'),
+ (0x1D502, 'M', u'y'),
+ (0x1D503, 'M', u'z'),
+ (0x1D504, 'M', u'a'),
+ (0x1D505, 'M', u'b'),
(0x1D506, 'X'),
- (0x1D507, 'M', 'd'),
- (0x1D508, 'M', 'e'),
- (0x1D509, 'M', 'f'),
- (0x1D50A, 'M', 'g'),
+ (0x1D507, 'M', u'd'),
+ (0x1D508, 'M', u'e'),
+ (0x1D509, 'M', u'f'),
+ (0x1D50A, 'M', u'g'),
(0x1D50B, 'X'),
- (0x1D50D, 'M', 'j'),
- (0x1D50E, 'M', 'k'),
- (0x1D50F, 'M', 'l'),
- (0x1D510, 'M', 'm'),
- (0x1D511, 'M', 'n'),
- (0x1D512, 'M', 'o'),
- (0x1D513, 'M', 'p'),
- (0x1D514, 'M', 'q'),
+ (0x1D50D, 'M', u'j'),
+ (0x1D50E, 'M', u'k'),
+ (0x1D50F, 'M', u'l'),
+ (0x1D510, 'M', u'm'),
+ (0x1D511, 'M', u'n'),
+ (0x1D512, 'M', u'o'),
+ (0x1D513, 'M', u'p'),
+ (0x1D514, 'M', u'q'),
(0x1D515, 'X'),
- (0x1D516, 'M', 's'),
- (0x1D517, 'M', 't'),
- (0x1D518, 'M', 'u'),
- (0x1D519, 'M', 'v'),
- (0x1D51A, 'M', 'w'),
- (0x1D51B, 'M', 'x'),
- (0x1D51C, 'M', 'y'),
+ (0x1D516, 'M', u's'),
+ (0x1D517, 'M', u't'),
+ (0x1D518, 'M', u'u'),
+ (0x1D519, 'M', u'v'),
+ (0x1D51A, 'M', u'w'),
+ (0x1D51B, 'M', u'x'),
+ (0x1D51C, 'M', u'y'),
(0x1D51D, 'X'),
- (0x1D51E, 'M', 'a'),
- (0x1D51F, 'M', 'b'),
- (0x1D520, 'M', 'c'),
- (0x1D521, 'M', 'd'),
- (0x1D522, 'M', 'e'),
- (0x1D523, 'M', 'f'),
- (0x1D524, 'M', 'g'),
- (0x1D525, 'M', 'h'),
- (0x1D526, 'M', 'i'),
- (0x1D527, 'M', 'j'),
- (0x1D528, 'M', 'k'),
- (0x1D529, 'M', 'l'),
- (0x1D52A, 'M', 'm'),
- (0x1D52B, 'M', 'n'),
- (0x1D52C, 'M', 'o'),
- (0x1D52D, 'M', 'p'),
- (0x1D52E, 'M', 'q'),
- (0x1D52F, 'M', 'r'),
- (0x1D530, 'M', 's'),
- (0x1D531, 'M', 't'),
- (0x1D532, 'M', 'u'),
- (0x1D533, 'M', 'v'),
- (0x1D534, 'M', 'w'),
- (0x1D535, 'M', 'x'),
- (0x1D536, 'M', 'y'),
- (0x1D537, 'M', 'z'),
- (0x1D538, 'M', 'a'),
- (0x1D539, 'M', 'b'),
+ (0x1D51E, 'M', u'a'),
+ (0x1D51F, 'M', u'b'),
+ (0x1D520, 'M', u'c'),
+ (0x1D521, 'M', u'd'),
+ (0x1D522, 'M', u'e'),
+ (0x1D523, 'M', u'f'),
+ (0x1D524, 'M', u'g'),
+ (0x1D525, 'M', u'h'),
+ (0x1D526, 'M', u'i'),
+ (0x1D527, 'M', u'j'),
+ (0x1D528, 'M', u'k'),
+ ]
+
+def _seg_61():
+ return [
+ (0x1D529, 'M', u'l'),
+ (0x1D52A, 'M', u'm'),
+ (0x1D52B, 'M', u'n'),
+ (0x1D52C, 'M', u'o'),
+ (0x1D52D, 'M', u'p'),
+ (0x1D52E, 'M', u'q'),
+ (0x1D52F, 'M', u'r'),
+ (0x1D530, 'M', u's'),
+ (0x1D531, 'M', u't'),
+ (0x1D532, 'M', u'u'),
+ (0x1D533, 'M', u'v'),
+ (0x1D534, 'M', u'w'),
+ (0x1D535, 'M', u'x'),
+ (0x1D536, 'M', u'y'),
+ (0x1D537, 'M', u'z'),
+ (0x1D538, 'M', u'a'),
+ (0x1D539, 'M', u'b'),
(0x1D53A, 'X'),
- (0x1D53B, 'M', 'd'),
- (0x1D53C, 'M', 'e'),
- (0x1D53D, 'M', 'f'),
- (0x1D53E, 'M', 'g'),
+ (0x1D53B, 'M', u'd'),
+ (0x1D53C, 'M', u'e'),
+ (0x1D53D, 'M', u'f'),
+ (0x1D53E, 'M', u'g'),
(0x1D53F, 'X'),
- (0x1D540, 'M', 'i'),
- (0x1D541, 'M', 'j'),
- (0x1D542, 'M', 'k'),
- (0x1D543, 'M', 'l'),
- (0x1D544, 'M', 'm'),
+ (0x1D540, 'M', u'i'),
+ (0x1D541, 'M', u'j'),
+ (0x1D542, 'M', u'k'),
+ (0x1D543, 'M', u'l'),
+ (0x1D544, 'M', u'm'),
(0x1D545, 'X'),
- (0x1D546, 'M', 'o'),
+ (0x1D546, 'M', u'o'),
(0x1D547, 'X'),
- (0x1D54A, 'M', 's'),
- (0x1D54B, 'M', 't'),
- (0x1D54C, 'M', 'u'),
- (0x1D54D, 'M', 'v'),
- (0x1D54E, 'M', 'w'),
- (0x1D54F, 'M', 'x'),
- (0x1D550, 'M', 'y'),
+ (0x1D54A, 'M', u's'),
+ (0x1D54B, 'M', u't'),
+ (0x1D54C, 'M', u'u'),
+ (0x1D54D, 'M', u'v'),
+ (0x1D54E, 'M', u'w'),
+ (0x1D54F, 'M', u'x'),
+ (0x1D550, 'M', u'y'),
(0x1D551, 'X'),
- (0x1D552, 'M', 'a'),
- (0x1D553, 'M', 'b'),
- (0x1D554, 'M', 'c'),
- (0x1D555, 'M', 'd'),
- (0x1D556, 'M', 'e'),
- (0x1D557, 'M', 'f'),
- (0x1D558, 'M', 'g'),
- (0x1D559, 'M', 'h'),
- (0x1D55A, 'M', 'i'),
- (0x1D55B, 'M', 'j'),
- (0x1D55C, 'M', 'k'),
- (0x1D55D, 'M', 'l'),
- (0x1D55E, 'M', 'm'),
- (0x1D55F, 'M', 'n'),
- (0x1D560, 'M', 'o'),
- (0x1D561, 'M', 'p'),
- (0x1D562, 'M', 'q'),
+ (0x1D552, 'M', u'a'),
+ (0x1D553, 'M', u'b'),
+ (0x1D554, 'M', u'c'),
+ (0x1D555, 'M', u'd'),
+ (0x1D556, 'M', u'e'),
+ (0x1D557, 'M', u'f'),
+ (0x1D558, 'M', u'g'),
+ (0x1D559, 'M', u'h'),
+ (0x1D55A, 'M', u'i'),
+ (0x1D55B, 'M', u'j'),
+ (0x1D55C, 'M', u'k'),
+ (0x1D55D, 'M', u'l'),
+ (0x1D55E, 'M', u'm'),
+ (0x1D55F, 'M', u'n'),
+ (0x1D560, 'M', u'o'),
+ (0x1D561, 'M', u'p'),
+ (0x1D562, 'M', u'q'),
+ (0x1D563, 'M', u'r'),
+ (0x1D564, 'M', u's'),
+ (0x1D565, 'M', u't'),
+ (0x1D566, 'M', u'u'),
+ (0x1D567, 'M', u'v'),
+ (0x1D568, 'M', u'w'),
+ (0x1D569, 'M', u'x'),
+ (0x1D56A, 'M', u'y'),
+ (0x1D56B, 'M', u'z'),
+ (0x1D56C, 'M', u'a'),
+ (0x1D56D, 'M', u'b'),
+ (0x1D56E, 'M', u'c'),
+ (0x1D56F, 'M', u'd'),
+ (0x1D570, 'M', u'e'),
+ (0x1D571, 'M', u'f'),
+ (0x1D572, 'M', u'g'),
+ (0x1D573, 'M', u'h'),
+ (0x1D574, 'M', u'i'),
+ (0x1D575, 'M', u'j'),
+ (0x1D576, 'M', u'k'),
+ (0x1D577, 'M', u'l'),
+ (0x1D578, 'M', u'm'),
+ (0x1D579, 'M', u'n'),
+ (0x1D57A, 'M', u'o'),
+ (0x1D57B, 'M', u'p'),
+ (0x1D57C, 'M', u'q'),
+ (0x1D57D, 'M', u'r'),
+ (0x1D57E, 'M', u's'),
+ (0x1D57F, 'M', u't'),
+ (0x1D580, 'M', u'u'),
+ (0x1D581, 'M', u'v'),
+ (0x1D582, 'M', u'w'),
+ (0x1D583, 'M', u'x'),
+ (0x1D584, 'M', u'y'),
+ (0x1D585, 'M', u'z'),
+ (0x1D586, 'M', u'a'),
+ (0x1D587, 'M', u'b'),
+ (0x1D588, 'M', u'c'),
+ (0x1D589, 'M', u'd'),
+ (0x1D58A, 'M', u'e'),
+ (0x1D58B, 'M', u'f'),
+ (0x1D58C, 'M', u'g'),
+ (0x1D58D, 'M', u'h'),
+ (0x1D58E, 'M', u'i'),
]
-def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_62():
return [
- (0x1D563, 'M', 'r'),
- (0x1D564, 'M', 's'),
- (0x1D565, 'M', 't'),
- (0x1D566, 'M', 'u'),
- (0x1D567, 'M', 'v'),
- (0x1D568, 'M', 'w'),
- (0x1D569, 'M', 'x'),
- (0x1D56A, 'M', 'y'),
- (0x1D56B, 'M', 'z'),
- (0x1D56C, 'M', 'a'),
- (0x1D56D, 'M', 'b'),
- (0x1D56E, 'M', 'c'),
- (0x1D56F, 'M', 'd'),
- (0x1D570, 'M', 'e'),
- (0x1D571, 'M', 'f'),
- (0x1D572, 'M', 'g'),
- (0x1D573, 'M', 'h'),
- (0x1D574, 'M', 'i'),
- (0x1D575, 'M', 'j'),
- (0x1D576, 'M', 'k'),
- (0x1D577, 'M', 'l'),
- (0x1D578, 'M', 'm'),
- (0x1D579, 'M', 'n'),
- (0x1D57A, 'M', 'o'),
- (0x1D57B, 'M', 'p'),
- (0x1D57C, 'M', 'q'),
- (0x1D57D, 'M', 'r'),
- (0x1D57E, 'M', 's'),
- (0x1D57F, 'M', 't'),
- (0x1D580, 'M', 'u'),
- (0x1D581, 'M', 'v'),
- (0x1D582, 'M', 'w'),
- (0x1D583, 'M', 'x'),
- (0x1D584, 'M', 'y'),
- (0x1D585, 'M', 'z'),
- (0x1D586, 'M', 'a'),
- (0x1D587, 'M', 'b'),
- (0x1D588, 'M', 'c'),
- (0x1D589, 'M', 'd'),
- (0x1D58A, 'M', 'e'),
- (0x1D58B, 'M', 'f'),
- (0x1D58C, 'M', 'g'),
- (0x1D58D, 'M', 'h'),
- (0x1D58E, 'M', 'i'),
- (0x1D58F, 'M', 'j'),
- (0x1D590, 'M', 'k'),
- (0x1D591, 'M', 'l'),
- (0x1D592, 'M', 'm'),
- (0x1D593, 'M', 'n'),
- (0x1D594, 'M', 'o'),
- (0x1D595, 'M', 'p'),
- (0x1D596, 'M', 'q'),
- (0x1D597, 'M', 'r'),
- (0x1D598, 'M', 's'),
- (0x1D599, 'M', 't'),
- (0x1D59A, 'M', 'u'),
- (0x1D59B, 'M', 'v'),
- (0x1D59C, 'M', 'w'),
- (0x1D59D, 'M', 'x'),
- (0x1D59E, 'M', 'y'),
- (0x1D59F, 'M', 'z'),
- (0x1D5A0, 'M', 'a'),
- (0x1D5A1, 'M', 'b'),
- (0x1D5A2, 'M', 'c'),
- (0x1D5A3, 'M', 'd'),
- (0x1D5A4, 'M', 'e'),
- (0x1D5A5, 'M', 'f'),
- (0x1D5A6, 'M', 'g'),
- (0x1D5A7, 'M', 'h'),
- (0x1D5A8, 'M', 'i'),
- (0x1D5A9, 'M', 'j'),
- (0x1D5AA, 'M', 'k'),
- (0x1D5AB, 'M', 'l'),
- (0x1D5AC, 'M', 'm'),
- (0x1D5AD, 'M', 'n'),
- (0x1D5AE, 'M', 'o'),
- (0x1D5AF, 'M', 'p'),
- (0x1D5B0, 'M', 'q'),
- (0x1D5B1, 'M', 'r'),
- (0x1D5B2, 'M', 's'),
- (0x1D5B3, 'M', 't'),
- (0x1D5B4, 'M', 'u'),
- (0x1D5B5, 'M', 'v'),
- (0x1D5B6, 'M', 'w'),
- (0x1D5B7, 'M', 'x'),
- (0x1D5B8, 'M', 'y'),
- (0x1D5B9, 'M', 'z'),
- (0x1D5BA, 'M', 'a'),
- (0x1D5BB, 'M', 'b'),
- (0x1D5BC, 'M', 'c'),
- (0x1D5BD, 'M', 'd'),
- (0x1D5BE, 'M', 'e'),
- (0x1D5BF, 'M', 'f'),
- (0x1D5C0, 'M', 'g'),
- (0x1D5C1, 'M', 'h'),
- (0x1D5C2, 'M', 'i'),
- (0x1D5C3, 'M', 'j'),
- (0x1D5C4, 'M', 'k'),
- (0x1D5C5, 'M', 'l'),
- (0x1D5C6, 'M', 'm'),
+ (0x1D58F, 'M', u'j'),
+ (0x1D590, 'M', u'k'),
+ (0x1D591, 'M', u'l'),
+ (0x1D592, 'M', u'm'),
+ (0x1D593, 'M', u'n'),
+ (0x1D594, 'M', u'o'),
+ (0x1D595, 'M', u'p'),
+ (0x1D596, 'M', u'q'),
+ (0x1D597, 'M', u'r'),
+ (0x1D598, 'M', u's'),
+ (0x1D599, 'M', u't'),
+ (0x1D59A, 'M', u'u'),
+ (0x1D59B, 'M', u'v'),
+ (0x1D59C, 'M', u'w'),
+ (0x1D59D, 'M', u'x'),
+ (0x1D59E, 'M', u'y'),
+ (0x1D59F, 'M', u'z'),
+ (0x1D5A0, 'M', u'a'),
+ (0x1D5A1, 'M', u'b'),
+ (0x1D5A2, 'M', u'c'),
+ (0x1D5A3, 'M', u'd'),
+ (0x1D5A4, 'M', u'e'),
+ (0x1D5A5, 'M', u'f'),
+ (0x1D5A6, 'M', u'g'),
+ (0x1D5A7, 'M', u'h'),
+ (0x1D5A8, 'M', u'i'),
+ (0x1D5A9, 'M', u'j'),
+ (0x1D5AA, 'M', u'k'),
+ (0x1D5AB, 'M', u'l'),
+ (0x1D5AC, 'M', u'm'),
+ (0x1D5AD, 'M', u'n'),
+ (0x1D5AE, 'M', u'o'),
+ (0x1D5AF, 'M', u'p'),
+ (0x1D5B0, 'M', u'q'),
+ (0x1D5B1, 'M', u'r'),
+ (0x1D5B2, 'M', u's'),
+ (0x1D5B3, 'M', u't'),
+ (0x1D5B4, 'M', u'u'),
+ (0x1D5B5, 'M', u'v'),
+ (0x1D5B6, 'M', u'w'),
+ (0x1D5B7, 'M', u'x'),
+ (0x1D5B8, 'M', u'y'),
+ (0x1D5B9, 'M', u'z'),
+ (0x1D5BA, 'M', u'a'),
+ (0x1D5BB, 'M', u'b'),
+ (0x1D5BC, 'M', u'c'),
+ (0x1D5BD, 'M', u'd'),
+ (0x1D5BE, 'M', u'e'),
+ (0x1D5BF, 'M', u'f'),
+ (0x1D5C0, 'M', u'g'),
+ (0x1D5C1, 'M', u'h'),
+ (0x1D5C2, 'M', u'i'),
+ (0x1D5C3, 'M', u'j'),
+ (0x1D5C4, 'M', u'k'),
+ (0x1D5C5, 'M', u'l'),
+ (0x1D5C6, 'M', u'm'),
+ (0x1D5C7, 'M', u'n'),
+ (0x1D5C8, 'M', u'o'),
+ (0x1D5C9, 'M', u'p'),
+ (0x1D5CA, 'M', u'q'),
+ (0x1D5CB, 'M', u'r'),
+ (0x1D5CC, 'M', u's'),
+ (0x1D5CD, 'M', u't'),
+ (0x1D5CE, 'M', u'u'),
+ (0x1D5CF, 'M', u'v'),
+ (0x1D5D0, 'M', u'w'),
+ (0x1D5D1, 'M', u'x'),
+ (0x1D5D2, 'M', u'y'),
+ (0x1D5D3, 'M', u'z'),
+ (0x1D5D4, 'M', u'a'),
+ (0x1D5D5, 'M', u'b'),
+ (0x1D5D6, 'M', u'c'),
+ (0x1D5D7, 'M', u'd'),
+ (0x1D5D8, 'M', u'e'),
+ (0x1D5D9, 'M', u'f'),
+ (0x1D5DA, 'M', u'g'),
+ (0x1D5DB, 'M', u'h'),
+ (0x1D5DC, 'M', u'i'),
+ (0x1D5DD, 'M', u'j'),
+ (0x1D5DE, 'M', u'k'),
+ (0x1D5DF, 'M', u'l'),
+ (0x1D5E0, 'M', u'm'),
+ (0x1D5E1, 'M', u'n'),
+ (0x1D5E2, 'M', u'o'),
+ (0x1D5E3, 'M', u'p'),
+ (0x1D5E4, 'M', u'q'),
+ (0x1D5E5, 'M', u'r'),
+ (0x1D5E6, 'M', u's'),
+ (0x1D5E7, 'M', u't'),
+ (0x1D5E8, 'M', u'u'),
+ (0x1D5E9, 'M', u'v'),
+ (0x1D5EA, 'M', u'w'),
+ (0x1D5EB, 'M', u'x'),
+ (0x1D5EC, 'M', u'y'),
+ (0x1D5ED, 'M', u'z'),
+ (0x1D5EE, 'M', u'a'),
+ (0x1D5EF, 'M', u'b'),
+ (0x1D5F0, 'M', u'c'),
+ (0x1D5F1, 'M', u'd'),
+ (0x1D5F2, 'M', u'e'),
]
-def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_63():
return [
- (0x1D5C7, 'M', 'n'),
- (0x1D5C8, 'M', 'o'),
- (0x1D5C9, 'M', 'p'),
- (0x1D5CA, 'M', 'q'),
- (0x1D5CB, 'M', 'r'),
- (0x1D5CC, 'M', 's'),
- (0x1D5CD, 'M', 't'),
- (0x1D5CE, 'M', 'u'),
- (0x1D5CF, 'M', 'v'),
- (0x1D5D0, 'M', 'w'),
- (0x1D5D1, 'M', 'x'),
- (0x1D5D2, 'M', 'y'),
- (0x1D5D3, 'M', 'z'),
- (0x1D5D4, 'M', 'a'),
- (0x1D5D5, 'M', 'b'),
- (0x1D5D6, 'M', 'c'),
- (0x1D5D7, 'M', 'd'),
- (0x1D5D8, 'M', 'e'),
- (0x1D5D9, 'M', 'f'),
- (0x1D5DA, 'M', 'g'),
- (0x1D5DB, 'M', 'h'),
- (0x1D5DC, 'M', 'i'),
- (0x1D5DD, 'M', 'j'),
- (0x1D5DE, 'M', 'k'),
- (0x1D5DF, 'M', 'l'),
- (0x1D5E0, 'M', 'm'),
- (0x1D5E1, 'M', 'n'),
- (0x1D5E2, 'M', 'o'),
- (0x1D5E3, 'M', 'p'),
- (0x1D5E4, 'M', 'q'),
- (0x1D5E5, 'M', 'r'),
- (0x1D5E6, 'M', 's'),
- (0x1D5E7, 'M', 't'),
- (0x1D5E8, 'M', 'u'),
- (0x1D5E9, 'M', 'v'),
- (0x1D5EA, 'M', 'w'),
- (0x1D5EB, 'M', 'x'),
- (0x1D5EC, 'M', 'y'),
- (0x1D5ED, 'M', 'z'),
- (0x1D5EE, 'M', 'a'),
- (0x1D5EF, 'M', 'b'),
- (0x1D5F0, 'M', 'c'),
- (0x1D5F1, 'M', 'd'),
- (0x1D5F2, 'M', 'e'),
- (0x1D5F3, 'M', 'f'),
- (0x1D5F4, 'M', 'g'),
- (0x1D5F5, 'M', 'h'),
- (0x1D5F6, 'M', 'i'),
- (0x1D5F7, 'M', 'j'),
- (0x1D5F8, 'M', 'k'),
- (0x1D5F9, 'M', 'l'),
- (0x1D5FA, 'M', 'm'),
- (0x1D5FB, 'M', 'n'),
- (0x1D5FC, 'M', 'o'),
- (0x1D5FD, 'M', 'p'),
- (0x1D5FE, 'M', 'q'),
- (0x1D5FF, 'M', 'r'),
- (0x1D600, 'M', 's'),
- (0x1D601, 'M', 't'),
- (0x1D602, 'M', 'u'),
- (0x1D603, 'M', 'v'),
- (0x1D604, 'M', 'w'),
- (0x1D605, 'M', 'x'),
- (0x1D606, 'M', 'y'),
- (0x1D607, 'M', 'z'),
- (0x1D608, 'M', 'a'),
- (0x1D609, 'M', 'b'),
- (0x1D60A, 'M', 'c'),
- (0x1D60B, 'M', 'd'),
- (0x1D60C, 'M', 'e'),
- (0x1D60D, 'M', 'f'),
- (0x1D60E, 'M', 'g'),
- (0x1D60F, 'M', 'h'),
- (0x1D610, 'M', 'i'),
- (0x1D611, 'M', 'j'),
- (0x1D612, 'M', 'k'),
- (0x1D613, 'M', 'l'),
- (0x1D614, 'M', 'm'),
- (0x1D615, 'M', 'n'),
- (0x1D616, 'M', 'o'),
- (0x1D617, 'M', 'p'),
- (0x1D618, 'M', 'q'),
- (0x1D619, 'M', 'r'),
- (0x1D61A, 'M', 's'),
- (0x1D61B, 'M', 't'),
- (0x1D61C, 'M', 'u'),
- (0x1D61D, 'M', 'v'),
- (0x1D61E, 'M', 'w'),
- (0x1D61F, 'M', 'x'),
- (0x1D620, 'M', 'y'),
- (0x1D621, 'M', 'z'),
- (0x1D622, 'M', 'a'),
- (0x1D623, 'M', 'b'),
- (0x1D624, 'M', 'c'),
- (0x1D625, 'M', 'd'),
- (0x1D626, 'M', 'e'),
- (0x1D627, 'M', 'f'),
- (0x1D628, 'M', 'g'),
- (0x1D629, 'M', 'h'),
- (0x1D62A, 'M', 'i'),
+ (0x1D5F3, 'M', u'f'),
+ (0x1D5F4, 'M', u'g'),
+ (0x1D5F5, 'M', u'h'),
+ (0x1D5F6, 'M', u'i'),
+ (0x1D5F7, 'M', u'j'),
+ (0x1D5F8, 'M', u'k'),
+ (0x1D5F9, 'M', u'l'),
+ (0x1D5FA, 'M', u'm'),
+ (0x1D5FB, 'M', u'n'),
+ (0x1D5FC, 'M', u'o'),
+ (0x1D5FD, 'M', u'p'),
+ (0x1D5FE, 'M', u'q'),
+ (0x1D5FF, 'M', u'r'),
+ (0x1D600, 'M', u's'),
+ (0x1D601, 'M', u't'),
+ (0x1D602, 'M', u'u'),
+ (0x1D603, 'M', u'v'),
+ (0x1D604, 'M', u'w'),
+ (0x1D605, 'M', u'x'),
+ (0x1D606, 'M', u'y'),
+ (0x1D607, 'M', u'z'),
+ (0x1D608, 'M', u'a'),
+ (0x1D609, 'M', u'b'),
+ (0x1D60A, 'M', u'c'),
+ (0x1D60B, 'M', u'd'),
+ (0x1D60C, 'M', u'e'),
+ (0x1D60D, 'M', u'f'),
+ (0x1D60E, 'M', u'g'),
+ (0x1D60F, 'M', u'h'),
+ (0x1D610, 'M', u'i'),
+ (0x1D611, 'M', u'j'),
+ (0x1D612, 'M', u'k'),
+ (0x1D613, 'M', u'l'),
+ (0x1D614, 'M', u'm'),
+ (0x1D615, 'M', u'n'),
+ (0x1D616, 'M', u'o'),
+ (0x1D617, 'M', u'p'),
+ (0x1D618, 'M', u'q'),
+ (0x1D619, 'M', u'r'),
+ (0x1D61A, 'M', u's'),
+ (0x1D61B, 'M', u't'),
+ (0x1D61C, 'M', u'u'),
+ (0x1D61D, 'M', u'v'),
+ (0x1D61E, 'M', u'w'),
+ (0x1D61F, 'M', u'x'),
+ (0x1D620, 'M', u'y'),
+ (0x1D621, 'M', u'z'),
+ (0x1D622, 'M', u'a'),
+ (0x1D623, 'M', u'b'),
+ (0x1D624, 'M', u'c'),
+ (0x1D625, 'M', u'd'),
+ (0x1D626, 'M', u'e'),
+ (0x1D627, 'M', u'f'),
+ (0x1D628, 'M', u'g'),
+ (0x1D629, 'M', u'h'),
+ (0x1D62A, 'M', u'i'),
+ (0x1D62B, 'M', u'j'),
+ (0x1D62C, 'M', u'k'),
+ (0x1D62D, 'M', u'l'),
+ (0x1D62E, 'M', u'm'),
+ (0x1D62F, 'M', u'n'),
+ (0x1D630, 'M', u'o'),
+ (0x1D631, 'M', u'p'),
+ (0x1D632, 'M', u'q'),
+ (0x1D633, 'M', u'r'),
+ (0x1D634, 'M', u's'),
+ (0x1D635, 'M', u't'),
+ (0x1D636, 'M', u'u'),
+ (0x1D637, 'M', u'v'),
+ (0x1D638, 'M', u'w'),
+ (0x1D639, 'M', u'x'),
+ (0x1D63A, 'M', u'y'),
+ (0x1D63B, 'M', u'z'),
+ (0x1D63C, 'M', u'a'),
+ (0x1D63D, 'M', u'b'),
+ (0x1D63E, 'M', u'c'),
+ (0x1D63F, 'M', u'd'),
+ (0x1D640, 'M', u'e'),
+ (0x1D641, 'M', u'f'),
+ (0x1D642, 'M', u'g'),
+ (0x1D643, 'M', u'h'),
+ (0x1D644, 'M', u'i'),
+ (0x1D645, 'M', u'j'),
+ (0x1D646, 'M', u'k'),
+ (0x1D647, 'M', u'l'),
+ (0x1D648, 'M', u'm'),
+ (0x1D649, 'M', u'n'),
+ (0x1D64A, 'M', u'o'),
+ (0x1D64B, 'M', u'p'),
+ (0x1D64C, 'M', u'q'),
+ (0x1D64D, 'M', u'r'),
+ (0x1D64E, 'M', u's'),
+ (0x1D64F, 'M', u't'),
+ (0x1D650, 'M', u'u'),
+ (0x1D651, 'M', u'v'),
+ (0x1D652, 'M', u'w'),
+ (0x1D653, 'M', u'x'),
+ (0x1D654, 'M', u'y'),
+ (0x1D655, 'M', u'z'),
+ (0x1D656, 'M', u'a'),
]
-def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_64():
return [
- (0x1D62B, 'M', 'j'),
- (0x1D62C, 'M', 'k'),
- (0x1D62D, 'M', 'l'),
- (0x1D62E, 'M', 'm'),
- (0x1D62F, 'M', 'n'),
- (0x1D630, 'M', 'o'),
- (0x1D631, 'M', 'p'),
- (0x1D632, 'M', 'q'),
- (0x1D633, 'M', 'r'),
- (0x1D634, 'M', 's'),
- (0x1D635, 'M', 't'),
- (0x1D636, 'M', 'u'),
- (0x1D637, 'M', 'v'),
- (0x1D638, 'M', 'w'),
- (0x1D639, 'M', 'x'),
- (0x1D63A, 'M', 'y'),
- (0x1D63B, 'M', 'z'),
- (0x1D63C, 'M', 'a'),
- (0x1D63D, 'M', 'b'),
- (0x1D63E, 'M', 'c'),
- (0x1D63F, 'M', 'd'),
- (0x1D640, 'M', 'e'),
- (0x1D641, 'M', 'f'),
- (0x1D642, 'M', 'g'),
- (0x1D643, 'M', 'h'),
- (0x1D644, 'M', 'i'),
- (0x1D645, 'M', 'j'),
- (0x1D646, 'M', 'k'),
- (0x1D647, 'M', 'l'),
- (0x1D648, 'M', 'm'),
- (0x1D649, 'M', 'n'),
- (0x1D64A, 'M', 'o'),
- (0x1D64B, 'M', 'p'),
- (0x1D64C, 'M', 'q'),
- (0x1D64D, 'M', 'r'),
- (0x1D64E, 'M', 's'),
- (0x1D64F, 'M', 't'),
- (0x1D650, 'M', 'u'),
- (0x1D651, 'M', 'v'),
- (0x1D652, 'M', 'w'),
- (0x1D653, 'M', 'x'),
- (0x1D654, 'M', 'y'),
- (0x1D655, 'M', 'z'),
- (0x1D656, 'M', 'a'),
- (0x1D657, 'M', 'b'),
- (0x1D658, 'M', 'c'),
- (0x1D659, 'M', 'd'),
- (0x1D65A, 'M', 'e'),
- (0x1D65B, 'M', 'f'),
- (0x1D65C, 'M', 'g'),
- (0x1D65D, 'M', 'h'),
- (0x1D65E, 'M', 'i'),
- (0x1D65F, 'M', 'j'),
- (0x1D660, 'M', 'k'),
- (0x1D661, 'M', 'l'),
- (0x1D662, 'M', 'm'),
- (0x1D663, 'M', 'n'),
- (0x1D664, 'M', 'o'),
- (0x1D665, 'M', 'p'),
- (0x1D666, 'M', 'q'),
- (0x1D667, 'M', 'r'),
- (0x1D668, 'M', 's'),
- (0x1D669, 'M', 't'),
- (0x1D66A, 'M', 'u'),
- (0x1D66B, 'M', 'v'),
- (0x1D66C, 'M', 'w'),
- (0x1D66D, 'M', 'x'),
- (0x1D66E, 'M', 'y'),
- (0x1D66F, 'M', 'z'),
- (0x1D670, 'M', 'a'),
- (0x1D671, 'M', 'b'),
- (0x1D672, 'M', 'c'),
- (0x1D673, 'M', 'd'),
- (0x1D674, 'M', 'e'),
- (0x1D675, 'M', 'f'),
- (0x1D676, 'M', 'g'),
- (0x1D677, 'M', 'h'),
- (0x1D678, 'M', 'i'),
- (0x1D679, 'M', 'j'),
- (0x1D67A, 'M', 'k'),
- (0x1D67B, 'M', 'l'),
- (0x1D67C, 'M', 'm'),
- (0x1D67D, 'M', 'n'),
- (0x1D67E, 'M', 'o'),
- (0x1D67F, 'M', 'p'),
- (0x1D680, 'M', 'q'),
- (0x1D681, 'M', 'r'),
- (0x1D682, 'M', 's'),
- (0x1D683, 'M', 't'),
- (0x1D684, 'M', 'u'),
- (0x1D685, 'M', 'v'),
- (0x1D686, 'M', 'w'),
- (0x1D687, 'M', 'x'),
- (0x1D688, 'M', 'y'),
- (0x1D689, 'M', 'z'),
- (0x1D68A, 'M', 'a'),
- (0x1D68B, 'M', 'b'),
- (0x1D68C, 'M', 'c'),
- (0x1D68D, 'M', 'd'),
- (0x1D68E, 'M', 'e'),
+ (0x1D657, 'M', u'b'),
+ (0x1D658, 'M', u'c'),
+ (0x1D659, 'M', u'd'),
+ (0x1D65A, 'M', u'e'),
+ (0x1D65B, 'M', u'f'),
+ (0x1D65C, 'M', u'g'),
+ (0x1D65D, 'M', u'h'),
+ (0x1D65E, 'M', u'i'),
+ (0x1D65F, 'M', u'j'),
+ (0x1D660, 'M', u'k'),
+ (0x1D661, 'M', u'l'),
+ (0x1D662, 'M', u'm'),
+ (0x1D663, 'M', u'n'),
+ (0x1D664, 'M', u'o'),
+ (0x1D665, 'M', u'p'),
+ (0x1D666, 'M', u'q'),
+ (0x1D667, 'M', u'r'),
+ (0x1D668, 'M', u's'),
+ (0x1D669, 'M', u't'),
+ (0x1D66A, 'M', u'u'),
+ (0x1D66B, 'M', u'v'),
+ (0x1D66C, 'M', u'w'),
+ (0x1D66D, 'M', u'x'),
+ (0x1D66E, 'M', u'y'),
+ (0x1D66F, 'M', u'z'),
+ (0x1D670, 'M', u'a'),
+ (0x1D671, 'M', u'b'),
+ (0x1D672, 'M', u'c'),
+ (0x1D673, 'M', u'd'),
+ (0x1D674, 'M', u'e'),
+ (0x1D675, 'M', u'f'),
+ (0x1D676, 'M', u'g'),
+ (0x1D677, 'M', u'h'),
+ (0x1D678, 'M', u'i'),
+ (0x1D679, 'M', u'j'),
+ (0x1D67A, 'M', u'k'),
+ (0x1D67B, 'M', u'l'),
+ (0x1D67C, 'M', u'm'),
+ (0x1D67D, 'M', u'n'),
+ (0x1D67E, 'M', u'o'),
+ (0x1D67F, 'M', u'p'),
+ (0x1D680, 'M', u'q'),
+ (0x1D681, 'M', u'r'),
+ (0x1D682, 'M', u's'),
+ (0x1D683, 'M', u't'),
+ (0x1D684, 'M', u'u'),
+ (0x1D685, 'M', u'v'),
+ (0x1D686, 'M', u'w'),
+ (0x1D687, 'M', u'x'),
+ (0x1D688, 'M', u'y'),
+ (0x1D689, 'M', u'z'),
+ (0x1D68A, 'M', u'a'),
+ (0x1D68B, 'M', u'b'),
+ (0x1D68C, 'M', u'c'),
+ (0x1D68D, 'M', u'd'),
+ (0x1D68E, 'M', u'e'),
+ (0x1D68F, 'M', u'f'),
+ (0x1D690, 'M', u'g'),
+ (0x1D691, 'M', u'h'),
+ (0x1D692, 'M', u'i'),
+ (0x1D693, 'M', u'j'),
+ (0x1D694, 'M', u'k'),
+ (0x1D695, 'M', u'l'),
+ (0x1D696, 'M', u'm'),
+ (0x1D697, 'M', u'n'),
+ (0x1D698, 'M', u'o'),
+ (0x1D699, 'M', u'p'),
+ (0x1D69A, 'M', u'q'),
+ (0x1D69B, 'M', u'r'),
+ (0x1D69C, 'M', u's'),
+ (0x1D69D, 'M', u't'),
+ (0x1D69E, 'M', u'u'),
+ (0x1D69F, 'M', u'v'),
+ (0x1D6A0, 'M', u'w'),
+ (0x1D6A1, 'M', u'x'),
+ (0x1D6A2, 'M', u'y'),
+ (0x1D6A3, 'M', u'z'),
+ (0x1D6A4, 'M', u'ı'),
+ (0x1D6A5, 'M', u'ȷ'),
+ (0x1D6A6, 'X'),
+ (0x1D6A8, 'M', u'α'),
+ (0x1D6A9, 'M', u'β'),
+ (0x1D6AA, 'M', u'γ'),
+ (0x1D6AB, 'M', u'δ'),
+ (0x1D6AC, 'M', u'ε'),
+ (0x1D6AD, 'M', u'ζ'),
+ (0x1D6AE, 'M', u'η'),
+ (0x1D6AF, 'M', u'θ'),
+ (0x1D6B0, 'M', u'ι'),
+ (0x1D6B1, 'M', u'κ'),
+ (0x1D6B2, 'M', u'λ'),
+ (0x1D6B3, 'M', u'μ'),
+ (0x1D6B4, 'M', u'ν'),
+ (0x1D6B5, 'M', u'ξ'),
+ (0x1D6B6, 'M', u'ο'),
+ (0x1D6B7, 'M', u'π'),
+ (0x1D6B8, 'M', u'ρ'),
+ (0x1D6B9, 'M', u'θ'),
+ (0x1D6BA, 'M', u'σ'),
+ (0x1D6BB, 'M', u'τ'),
]
-def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_65():
return [
- (0x1D68F, 'M', 'f'),
- (0x1D690, 'M', 'g'),
- (0x1D691, 'M', 'h'),
- (0x1D692, 'M', 'i'),
- (0x1D693, 'M', 'j'),
- (0x1D694, 'M', 'k'),
- (0x1D695, 'M', 'l'),
- (0x1D696, 'M', 'm'),
- (0x1D697, 'M', 'n'),
- (0x1D698, 'M', 'o'),
- (0x1D699, 'M', 'p'),
- (0x1D69A, 'M', 'q'),
- (0x1D69B, 'M', 'r'),
- (0x1D69C, 'M', 's'),
- (0x1D69D, 'M', 't'),
- (0x1D69E, 'M', 'u'),
- (0x1D69F, 'M', 'v'),
- (0x1D6A0, 'M', 'w'),
- (0x1D6A1, 'M', 'x'),
- (0x1D6A2, 'M', 'y'),
- (0x1D6A3, 'M', 'z'),
- (0x1D6A4, 'M', 'ı'),
- (0x1D6A5, 'M', 'ȷ'),
- (0x1D6A6, 'X'),
- (0x1D6A8, 'M', 'α'),
- (0x1D6A9, 'M', 'β'),
- (0x1D6AA, 'M', 'γ'),
- (0x1D6AB, 'M', 'δ'),
- (0x1D6AC, 'M', 'ε'),
- (0x1D6AD, 'M', 'ζ'),
- (0x1D6AE, 'M', 'η'),
- (0x1D6AF, 'M', 'θ'),
- (0x1D6B0, 'M', 'ι'),
- (0x1D6B1, 'M', 'κ'),
- (0x1D6B2, 'M', 'λ'),
- (0x1D6B3, 'M', 'μ'),
- (0x1D6B4, 'M', 'ν'),
- (0x1D6B5, 'M', 'ξ'),
- (0x1D6B6, 'M', 'ο'),
- (0x1D6B7, 'M', 'π'),
- (0x1D6B8, 'M', 'ρ'),
- (0x1D6B9, 'M', 'θ'),
- (0x1D6BA, 'M', 'σ'),
- (0x1D6BB, 'M', 'τ'),
- (0x1D6BC, 'M', 'υ'),
- (0x1D6BD, 'M', 'φ'),
- (0x1D6BE, 'M', 'χ'),
- (0x1D6BF, 'M', 'ψ'),
- (0x1D6C0, 'M', 'ω'),
- (0x1D6C1, 'M', '∇'),
- (0x1D6C2, 'M', 'α'),
- (0x1D6C3, 'M', 'β'),
- (0x1D6C4, 'M', 'γ'),
- (0x1D6C5, 'M', 'δ'),
- (0x1D6C6, 'M', 'ε'),
- (0x1D6C7, 'M', 'ζ'),
- (0x1D6C8, 'M', 'η'),
- (0x1D6C9, 'M', 'θ'),
- (0x1D6CA, 'M', 'ι'),
- (0x1D6CB, 'M', 'κ'),
- (0x1D6CC, 'M', 'λ'),
- (0x1D6CD, 'M', 'μ'),
- (0x1D6CE, 'M', 'ν'),
- (0x1D6CF, 'M', 'ξ'),
- (0x1D6D0, 'M', 'ο'),
- (0x1D6D1, 'M', 'π'),
- (0x1D6D2, 'M', 'ρ'),
- (0x1D6D3, 'M', 'σ'),
- (0x1D6D5, 'M', 'τ'),
- (0x1D6D6, 'M', 'υ'),
- (0x1D6D7, 'M', 'φ'),
- (0x1D6D8, 'M', 'χ'),
- (0x1D6D9, 'M', 'ψ'),
- (0x1D6DA, 'M', 'ω'),
- (0x1D6DB, 'M', '∂'),
- (0x1D6DC, 'M', 'ε'),
- (0x1D6DD, 'M', 'θ'),
- (0x1D6DE, 'M', 'κ'),
- (0x1D6DF, 'M', 'φ'),
- (0x1D6E0, 'M', 'ρ'),
- (0x1D6E1, 'M', 'π'),
- (0x1D6E2, 'M', 'α'),
- (0x1D6E3, 'M', 'β'),
- (0x1D6E4, 'M', 'γ'),
- (0x1D6E5, 'M', 'δ'),
- (0x1D6E6, 'M', 'ε'),
- (0x1D6E7, 'M', 'ζ'),
- (0x1D6E8, 'M', 'η'),
- (0x1D6E9, 'M', 'θ'),
- (0x1D6EA, 'M', 'ι'),
- (0x1D6EB, 'M', 'κ'),
- (0x1D6EC, 'M', 'λ'),
- (0x1D6ED, 'M', 'μ'),
- (0x1D6EE, 'M', 'ν'),
- (0x1D6EF, 'M', 'ξ'),
- (0x1D6F0, 'M', 'ο'),
- (0x1D6F1, 'M', 'π'),
- (0x1D6F2, 'M', 'ρ'),
- (0x1D6F3, 'M', 'θ'),
- (0x1D6F4, 'M', 'σ'),
+ (0x1D6BC, 'M', u'υ'),
+ (0x1D6BD, 'M', u'φ'),
+ (0x1D6BE, 'M', u'χ'),
+ (0x1D6BF, 'M', u'ψ'),
+ (0x1D6C0, 'M', u'ω'),
+ (0x1D6C1, 'M', u'∇'),
+ (0x1D6C2, 'M', u'α'),
+ (0x1D6C3, 'M', u'β'),
+ (0x1D6C4, 'M', u'γ'),
+ (0x1D6C5, 'M', u'δ'),
+ (0x1D6C6, 'M', u'ε'),
+ (0x1D6C7, 'M', u'ζ'),
+ (0x1D6C8, 'M', u'η'),
+ (0x1D6C9, 'M', u'θ'),
+ (0x1D6CA, 'M', u'ι'),
+ (0x1D6CB, 'M', u'κ'),
+ (0x1D6CC, 'M', u'λ'),
+ (0x1D6CD, 'M', u'μ'),
+ (0x1D6CE, 'M', u'ν'),
+ (0x1D6CF, 'M', u'ξ'),
+ (0x1D6D0, 'M', u'ο'),
+ (0x1D6D1, 'M', u'π'),
+ (0x1D6D2, 'M', u'ρ'),
+ (0x1D6D3, 'M', u'σ'),
+ (0x1D6D5, 'M', u'τ'),
+ (0x1D6D6, 'M', u'υ'),
+ (0x1D6D7, 'M', u'φ'),
+ (0x1D6D8, 'M', u'χ'),
+ (0x1D6D9, 'M', u'ψ'),
+ (0x1D6DA, 'M', u'ω'),
+ (0x1D6DB, 'M', u'∂'),
+ (0x1D6DC, 'M', u'ε'),
+ (0x1D6DD, 'M', u'θ'),
+ (0x1D6DE, 'M', u'κ'),
+ (0x1D6DF, 'M', u'φ'),
+ (0x1D6E0, 'M', u'ρ'),
+ (0x1D6E1, 'M', u'π'),
+ (0x1D6E2, 'M', u'α'),
+ (0x1D6E3, 'M', u'β'),
+ (0x1D6E4, 'M', u'γ'),
+ (0x1D6E5, 'M', u'δ'),
+ (0x1D6E6, 'M', u'ε'),
+ (0x1D6E7, 'M', u'ζ'),
+ (0x1D6E8, 'M', u'η'),
+ (0x1D6E9, 'M', u'θ'),
+ (0x1D6EA, 'M', u'ι'),
+ (0x1D6EB, 'M', u'κ'),
+ (0x1D6EC, 'M', u'λ'),
+ (0x1D6ED, 'M', u'μ'),
+ (0x1D6EE, 'M', u'ν'),
+ (0x1D6EF, 'M', u'ξ'),
+ (0x1D6F0, 'M', u'ο'),
+ (0x1D6F1, 'M', u'π'),
+ (0x1D6F2, 'M', u'ρ'),
+ (0x1D6F3, 'M', u'θ'),
+ (0x1D6F4, 'M', u'σ'),
+ (0x1D6F5, 'M', u'τ'),
+ (0x1D6F6, 'M', u'υ'),
+ (0x1D6F7, 'M', u'φ'),
+ (0x1D6F8, 'M', u'χ'),
+ (0x1D6F9, 'M', u'ψ'),
+ (0x1D6FA, 'M', u'ω'),
+ (0x1D6FB, 'M', u'∇'),
+ (0x1D6FC, 'M', u'α'),
+ (0x1D6FD, 'M', u'β'),
+ (0x1D6FE, 'M', u'γ'),
+ (0x1D6FF, 'M', u'δ'),
+ (0x1D700, 'M', u'ε'),
+ (0x1D701, 'M', u'ζ'),
+ (0x1D702, 'M', u'η'),
+ (0x1D703, 'M', u'θ'),
+ (0x1D704, 'M', u'ι'),
+ (0x1D705, 'M', u'κ'),
+ (0x1D706, 'M', u'λ'),
+ (0x1D707, 'M', u'μ'),
+ (0x1D708, 'M', u'ν'),
+ (0x1D709, 'M', u'ξ'),
+ (0x1D70A, 'M', u'ο'),
+ (0x1D70B, 'M', u'π'),
+ (0x1D70C, 'M', u'ρ'),
+ (0x1D70D, 'M', u'σ'),
+ (0x1D70F, 'M', u'τ'),
+ (0x1D710, 'M', u'υ'),
+ (0x1D711, 'M', u'φ'),
+ (0x1D712, 'M', u'χ'),
+ (0x1D713, 'M', u'ψ'),
+ (0x1D714, 'M', u'ω'),
+ (0x1D715, 'M', u'∂'),
+ (0x1D716, 'M', u'ε'),
+ (0x1D717, 'M', u'θ'),
+ (0x1D718, 'M', u'κ'),
+ (0x1D719, 'M', u'φ'),
+ (0x1D71A, 'M', u'ρ'),
+ (0x1D71B, 'M', u'π'),
+ (0x1D71C, 'M', u'α'),
+ (0x1D71D, 'M', u'β'),
+ (0x1D71E, 'M', u'γ'),
+ (0x1D71F, 'M', u'δ'),
+ (0x1D720, 'M', u'ε'),
+ (0x1D721, 'M', u'ζ'),
]
-def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_66():
return [
- (0x1D6F5, 'M', 'τ'),
- (0x1D6F6, 'M', 'υ'),
- (0x1D6F7, 'M', 'φ'),
- (0x1D6F8, 'M', 'χ'),
- (0x1D6F9, 'M', 'ψ'),
- (0x1D6FA, 'M', 'ω'),
- (0x1D6FB, 'M', '∇'),
- (0x1D6FC, 'M', 'α'),
- (0x1D6FD, 'M', 'β'),
- (0x1D6FE, 'M', 'γ'),
- (0x1D6FF, 'M', 'δ'),
- (0x1D700, 'M', 'ε'),
- (0x1D701, 'M', 'ζ'),
- (0x1D702, 'M', 'η'),
- (0x1D703, 'M', 'θ'),
- (0x1D704, 'M', 'ι'),
- (0x1D705, 'M', 'κ'),
- (0x1D706, 'M', 'λ'),
- (0x1D707, 'M', 'μ'),
- (0x1D708, 'M', 'ν'),
- (0x1D709, 'M', 'ξ'),
- (0x1D70A, 'M', 'ο'),
- (0x1D70B, 'M', 'π'),
- (0x1D70C, 'M', 'ρ'),
- (0x1D70D, 'M', 'σ'),
- (0x1D70F, 'M', 'τ'),
- (0x1D710, 'M', 'υ'),
- (0x1D711, 'M', 'φ'),
- (0x1D712, 'M', 'χ'),
- (0x1D713, 'M', 'ψ'),
- (0x1D714, 'M', 'ω'),
- (0x1D715, 'M', '∂'),
- (0x1D716, 'M', 'ε'),
- (0x1D717, 'M', 'θ'),
- (0x1D718, 'M', 'κ'),
- (0x1D719, 'M', 'φ'),
- (0x1D71A, 'M', 'ρ'),
- (0x1D71B, 'M', 'π'),
- (0x1D71C, 'M', 'α'),
- (0x1D71D, 'M', 'β'),
- (0x1D71E, 'M', 'γ'),
- (0x1D71F, 'M', 'δ'),
- (0x1D720, 'M', 'ε'),
- (0x1D721, 'M', 'ζ'),
- (0x1D722, 'M', 'η'),
- (0x1D723, 'M', 'θ'),
- (0x1D724, 'M', 'ι'),
- (0x1D725, 'M', 'κ'),
- (0x1D726, 'M', 'λ'),
- (0x1D727, 'M', 'μ'),
- (0x1D728, 'M', 'ν'),
- (0x1D729, 'M', 'ξ'),
- (0x1D72A, 'M', 'ο'),
- (0x1D72B, 'M', 'π'),
- (0x1D72C, 'M', 'ρ'),
- (0x1D72D, 'M', 'θ'),
- (0x1D72E, 'M', 'σ'),
- (0x1D72F, 'M', 'τ'),
- (0x1D730, 'M', 'υ'),
- (0x1D731, 'M', 'φ'),
- (0x1D732, 'M', 'χ'),
- (0x1D733, 'M', 'ψ'),
- (0x1D734, 'M', 'ω'),
- (0x1D735, 'M', '∇'),
- (0x1D736, 'M', 'α'),
- (0x1D737, 'M', 'β'),
- (0x1D738, 'M', 'γ'),
- (0x1D739, 'M', 'δ'),
- (0x1D73A, 'M', 'ε'),
- (0x1D73B, 'M', 'ζ'),
- (0x1D73C, 'M', 'η'),
- (0x1D73D, 'M', 'θ'),
- (0x1D73E, 'M', 'ι'),
- (0x1D73F, 'M', 'κ'),
- (0x1D740, 'M', 'λ'),
- (0x1D741, 'M', 'μ'),
- (0x1D742, 'M', 'ν'),
- (0x1D743, 'M', 'ξ'),
- (0x1D744, 'M', 'ο'),
- (0x1D745, 'M', 'π'),
- (0x1D746, 'M', 'ρ'),
- (0x1D747, 'M', 'σ'),
- (0x1D749, 'M', 'τ'),
- (0x1D74A, 'M', 'υ'),
- (0x1D74B, 'M', 'φ'),
- (0x1D74C, 'M', 'χ'),
- (0x1D74D, 'M', 'ψ'),
- (0x1D74E, 'M', 'ω'),
- (0x1D74F, 'M', '∂'),
- (0x1D750, 'M', 'ε'),
- (0x1D751, 'M', 'θ'),
- (0x1D752, 'M', 'κ'),
- (0x1D753, 'M', 'φ'),
- (0x1D754, 'M', 'ρ'),
- (0x1D755, 'M', 'π'),
- (0x1D756, 'M', 'α'),
- (0x1D757, 'M', 'β'),
- (0x1D758, 'M', 'γ'),
- (0x1D759, 'M', 'δ'),
- (0x1D75A, 'M', 'ε'),
+ (0x1D722, 'M', u'η'),
+ (0x1D723, 'M', u'θ'),
+ (0x1D724, 'M', u'ι'),
+ (0x1D725, 'M', u'κ'),
+ (0x1D726, 'M', u'λ'),
+ (0x1D727, 'M', u'μ'),
+ (0x1D728, 'M', u'ν'),
+ (0x1D729, 'M', u'ξ'),
+ (0x1D72A, 'M', u'ο'),
+ (0x1D72B, 'M', u'π'),
+ (0x1D72C, 'M', u'ρ'),
+ (0x1D72D, 'M', u'θ'),
+ (0x1D72E, 'M', u'σ'),
+ (0x1D72F, 'M', u'τ'),
+ (0x1D730, 'M', u'υ'),
+ (0x1D731, 'M', u'φ'),
+ (0x1D732, 'M', u'χ'),
+ (0x1D733, 'M', u'ψ'),
+ (0x1D734, 'M', u'ω'),
+ (0x1D735, 'M', u'∇'),
+ (0x1D736, 'M', u'α'),
+ (0x1D737, 'M', u'β'),
+ (0x1D738, 'M', u'γ'),
+ (0x1D739, 'M', u'δ'),
+ (0x1D73A, 'M', u'ε'),
+ (0x1D73B, 'M', u'ζ'),
+ (0x1D73C, 'M', u'η'),
+ (0x1D73D, 'M', u'θ'),
+ (0x1D73E, 'M', u'ι'),
+ (0x1D73F, 'M', u'κ'),
+ (0x1D740, 'M', u'λ'),
+ (0x1D741, 'M', u'μ'),
+ (0x1D742, 'M', u'ν'),
+ (0x1D743, 'M', u'ξ'),
+ (0x1D744, 'M', u'ο'),
+ (0x1D745, 'M', u'π'),
+ (0x1D746, 'M', u'ρ'),
+ (0x1D747, 'M', u'σ'),
+ (0x1D749, 'M', u'τ'),
+ (0x1D74A, 'M', u'υ'),
+ (0x1D74B, 'M', u'φ'),
+ (0x1D74C, 'M', u'χ'),
+ (0x1D74D, 'M', u'ψ'),
+ (0x1D74E, 'M', u'ω'),
+ (0x1D74F, 'M', u'∂'),
+ (0x1D750, 'M', u'ε'),
+ (0x1D751, 'M', u'θ'),
+ (0x1D752, 'M', u'κ'),
+ (0x1D753, 'M', u'φ'),
+ (0x1D754, 'M', u'ρ'),
+ (0x1D755, 'M', u'π'),
+ (0x1D756, 'M', u'α'),
+ (0x1D757, 'M', u'β'),
+ (0x1D758, 'M', u'γ'),
+ (0x1D759, 'M', u'δ'),
+ (0x1D75A, 'M', u'ε'),
+ (0x1D75B, 'M', u'ζ'),
+ (0x1D75C, 'M', u'η'),
+ (0x1D75D, 'M', u'θ'),
+ (0x1D75E, 'M', u'ι'),
+ (0x1D75F, 'M', u'κ'),
+ (0x1D760, 'M', u'λ'),
+ (0x1D761, 'M', u'μ'),
+ (0x1D762, 'M', u'ν'),
+ (0x1D763, 'M', u'ξ'),
+ (0x1D764, 'M', u'ο'),
+ (0x1D765, 'M', u'π'),
+ (0x1D766, 'M', u'ρ'),
+ (0x1D767, 'M', u'θ'),
+ (0x1D768, 'M', u'σ'),
+ (0x1D769, 'M', u'τ'),
+ (0x1D76A, 'M', u'υ'),
+ (0x1D76B, 'M', u'φ'),
+ (0x1D76C, 'M', u'χ'),
+ (0x1D76D, 'M', u'ψ'),
+ (0x1D76E, 'M', u'ω'),
+ (0x1D76F, 'M', u'∇'),
+ (0x1D770, 'M', u'α'),
+ (0x1D771, 'M', u'β'),
+ (0x1D772, 'M', u'γ'),
+ (0x1D773, 'M', u'δ'),
+ (0x1D774, 'M', u'ε'),
+ (0x1D775, 'M', u'ζ'),
+ (0x1D776, 'M', u'η'),
+ (0x1D777, 'M', u'θ'),
+ (0x1D778, 'M', u'ι'),
+ (0x1D779, 'M', u'κ'),
+ (0x1D77A, 'M', u'λ'),
+ (0x1D77B, 'M', u'μ'),
+ (0x1D77C, 'M', u'ν'),
+ (0x1D77D, 'M', u'ξ'),
+ (0x1D77E, 'M', u'ο'),
+ (0x1D77F, 'M', u'π'),
+ (0x1D780, 'M', u'ρ'),
+ (0x1D781, 'M', u'σ'),
+ (0x1D783, 'M', u'τ'),
+ (0x1D784, 'M', u'υ'),
+ (0x1D785, 'M', u'φ'),
+ (0x1D786, 'M', u'χ'),
+ (0x1D787, 'M', u'ψ'),
]
-def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_67():
return [
- (0x1D75B, 'M', 'ζ'),
- (0x1D75C, 'M', 'η'),
- (0x1D75D, 'M', 'θ'),
- (0x1D75E, 'M', 'ι'),
- (0x1D75F, 'M', 'κ'),
- (0x1D760, 'M', 'λ'),
- (0x1D761, 'M', 'μ'),
- (0x1D762, 'M', 'ν'),
- (0x1D763, 'M', 'ξ'),
- (0x1D764, 'M', 'ο'),
- (0x1D765, 'M', 'π'),
- (0x1D766, 'M', 'ρ'),
- (0x1D767, 'M', 'θ'),
- (0x1D768, 'M', 'σ'),
- (0x1D769, 'M', 'τ'),
- (0x1D76A, 'M', 'υ'),
- (0x1D76B, 'M', 'φ'),
- (0x1D76C, 'M', 'χ'),
- (0x1D76D, 'M', 'ψ'),
- (0x1D76E, 'M', 'ω'),
- (0x1D76F, 'M', '∇'),
- (0x1D770, 'M', 'α'),
- (0x1D771, 'M', 'β'),
- (0x1D772, 'M', 'γ'),
- (0x1D773, 'M', 'δ'),
- (0x1D774, 'M', 'ε'),
- (0x1D775, 'M', 'ζ'),
- (0x1D776, 'M', 'η'),
- (0x1D777, 'M', 'θ'),
- (0x1D778, 'M', 'ι'),
- (0x1D779, 'M', 'κ'),
- (0x1D77A, 'M', 'λ'),
- (0x1D77B, 'M', 'μ'),
- (0x1D77C, 'M', 'ν'),
- (0x1D77D, 'M', 'ξ'),
- (0x1D77E, 'M', 'ο'),
- (0x1D77F, 'M', 'π'),
- (0x1D780, 'M', 'ρ'),
- (0x1D781, 'M', 'σ'),
- (0x1D783, 'M', 'τ'),
- (0x1D784, 'M', 'υ'),
- (0x1D785, 'M', 'φ'),
- (0x1D786, 'M', 'χ'),
- (0x1D787, 'M', 'ψ'),
- (0x1D788, 'M', 'ω'),
- (0x1D789, 'M', '∂'),
- (0x1D78A, 'M', 'ε'),
- (0x1D78B, 'M', 'θ'),
- (0x1D78C, 'M', 'κ'),
- (0x1D78D, 'M', 'φ'),
- (0x1D78E, 'M', 'ρ'),
- (0x1D78F, 'M', 'π'),
- (0x1D790, 'M', 'α'),
- (0x1D791, 'M', 'β'),
- (0x1D792, 'M', 'γ'),
- (0x1D793, 'M', 'δ'),
- (0x1D794, 'M', 'ε'),
- (0x1D795, 'M', 'ζ'),
- (0x1D796, 'M', 'η'),
- (0x1D797, 'M', 'θ'),
- (0x1D798, 'M', 'ι'),
- (0x1D799, 'M', 'κ'),
- (0x1D79A, 'M', 'λ'),
- (0x1D79B, 'M', 'μ'),
- (0x1D79C, 'M', 'ν'),
- (0x1D79D, 'M', 'ξ'),
- (0x1D79E, 'M', 'ο'),
- (0x1D79F, 'M', 'π'),
- (0x1D7A0, 'M', 'ρ'),
- (0x1D7A1, 'M', 'θ'),
- (0x1D7A2, 'M', 'σ'),
- (0x1D7A3, 'M', 'τ'),
- (0x1D7A4, 'M', 'υ'),
- (0x1D7A5, 'M', 'φ'),
- (0x1D7A6, 'M', 'χ'),
- (0x1D7A7, 'M', 'ψ'),
- (0x1D7A8, 'M', 'ω'),
- (0x1D7A9, 'M', '∇'),
- (0x1D7AA, 'M', 'α'),
- (0x1D7AB, 'M', 'β'),
- (0x1D7AC, 'M', 'γ'),
- (0x1D7AD, 'M', 'δ'),
- (0x1D7AE, 'M', 'ε'),
- (0x1D7AF, 'M', 'ζ'),
- (0x1D7B0, 'M', 'η'),
- (0x1D7B1, 'M', 'θ'),
- (0x1D7B2, 'M', 'ι'),
- (0x1D7B3, 'M', 'κ'),
- (0x1D7B4, 'M', 'λ'),
- (0x1D7B5, 'M', 'μ'),
- (0x1D7B6, 'M', 'ν'),
- (0x1D7B7, 'M', 'ξ'),
- (0x1D7B8, 'M', 'ο'),
- (0x1D7B9, 'M', 'π'),
- (0x1D7BA, 'M', 'ρ'),
- (0x1D7BB, 'M', 'σ'),
- (0x1D7BD, 'M', 'τ'),
- (0x1D7BE, 'M', 'υ'),
- (0x1D7BF, 'M', 'φ'),
- (0x1D7C0, 'M', 'χ'),
+ (0x1D788, 'M', u'ω'),
+ (0x1D789, 'M', u'∂'),
+ (0x1D78A, 'M', u'ε'),
+ (0x1D78B, 'M', u'θ'),
+ (0x1D78C, 'M', u'κ'),
+ (0x1D78D, 'M', u'φ'),
+ (0x1D78E, 'M', u'ρ'),
+ (0x1D78F, 'M', u'π'),
+ (0x1D790, 'M', u'α'),
+ (0x1D791, 'M', u'β'),
+ (0x1D792, 'M', u'γ'),
+ (0x1D793, 'M', u'δ'),
+ (0x1D794, 'M', u'ε'),
+ (0x1D795, 'M', u'ζ'),
+ (0x1D796, 'M', u'η'),
+ (0x1D797, 'M', u'θ'),
+ (0x1D798, 'M', u'ι'),
+ (0x1D799, 'M', u'κ'),
+ (0x1D79A, 'M', u'λ'),
+ (0x1D79B, 'M', u'μ'),
+ (0x1D79C, 'M', u'ν'),
+ (0x1D79D, 'M', u'ξ'),
+ (0x1D79E, 'M', u'ο'),
+ (0x1D79F, 'M', u'π'),
+ (0x1D7A0, 'M', u'ρ'),
+ (0x1D7A1, 'M', u'θ'),
+ (0x1D7A2, 'M', u'σ'),
+ (0x1D7A3, 'M', u'τ'),
+ (0x1D7A4, 'M', u'υ'),
+ (0x1D7A5, 'M', u'φ'),
+ (0x1D7A6, 'M', u'χ'),
+ (0x1D7A7, 'M', u'ψ'),
+ (0x1D7A8, 'M', u'ω'),
+ (0x1D7A9, 'M', u'∇'),
+ (0x1D7AA, 'M', u'α'),
+ (0x1D7AB, 'M', u'β'),
+ (0x1D7AC, 'M', u'γ'),
+ (0x1D7AD, 'M', u'δ'),
+ (0x1D7AE, 'M', u'ε'),
+ (0x1D7AF, 'M', u'ζ'),
+ (0x1D7B0, 'M', u'η'),
+ (0x1D7B1, 'M', u'θ'),
+ (0x1D7B2, 'M', u'ι'),
+ (0x1D7B3, 'M', u'κ'),
+ (0x1D7B4, 'M', u'λ'),
+ (0x1D7B5, 'M', u'μ'),
+ (0x1D7B6, 'M', u'ν'),
+ (0x1D7B7, 'M', u'ξ'),
+ (0x1D7B8, 'M', u'ο'),
+ (0x1D7B9, 'M', u'π'),
+ (0x1D7BA, 'M', u'ρ'),
+ (0x1D7BB, 'M', u'σ'),
+ (0x1D7BD, 'M', u'τ'),
+ (0x1D7BE, 'M', u'υ'),
+ (0x1D7BF, 'M', u'φ'),
+ (0x1D7C0, 'M', u'χ'),
+ (0x1D7C1, 'M', u'ψ'),
+ (0x1D7C2, 'M', u'ω'),
+ (0x1D7C3, 'M', u'∂'),
+ (0x1D7C4, 'M', u'ε'),
+ (0x1D7C5, 'M', u'θ'),
+ (0x1D7C6, 'M', u'κ'),
+ (0x1D7C7, 'M', u'φ'),
+ (0x1D7C8, 'M', u'ρ'),
+ (0x1D7C9, 'M', u'π'),
+ (0x1D7CA, 'M', u'ϝ'),
+ (0x1D7CC, 'X'),
+ (0x1D7CE, 'M', u'0'),
+ (0x1D7CF, 'M', u'1'),
+ (0x1D7D0, 'M', u'2'),
+ (0x1D7D1, 'M', u'3'),
+ (0x1D7D2, 'M', u'4'),
+ (0x1D7D3, 'M', u'5'),
+ (0x1D7D4, 'M', u'6'),
+ (0x1D7D5, 'M', u'7'),
+ (0x1D7D6, 'M', u'8'),
+ (0x1D7D7, 'M', u'9'),
+ (0x1D7D8, 'M', u'0'),
+ (0x1D7D9, 'M', u'1'),
+ (0x1D7DA, 'M', u'2'),
+ (0x1D7DB, 'M', u'3'),
+ (0x1D7DC, 'M', u'4'),
+ (0x1D7DD, 'M', u'5'),
+ (0x1D7DE, 'M', u'6'),
+ (0x1D7DF, 'M', u'7'),
+ (0x1D7E0, 'M', u'8'),
+ (0x1D7E1, 'M', u'9'),
+ (0x1D7E2, 'M', u'0'),
+ (0x1D7E3, 'M', u'1'),
+ (0x1D7E4, 'M', u'2'),
+ (0x1D7E5, 'M', u'3'),
+ (0x1D7E6, 'M', u'4'),
+ (0x1D7E7, 'M', u'5'),
+ (0x1D7E8, 'M', u'6'),
+ (0x1D7E9, 'M', u'7'),
+ (0x1D7EA, 'M', u'8'),
+ (0x1D7EB, 'M', u'9'),
+ (0x1D7EC, 'M', u'0'),
+ (0x1D7ED, 'M', u'1'),
+ (0x1D7EE, 'M', u'2'),
]
-def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_68():
return [
- (0x1D7C1, 'M', 'ψ'),
- (0x1D7C2, 'M', 'ω'),
- (0x1D7C3, 'M', '∂'),
- (0x1D7C4, 'M', 'ε'),
- (0x1D7C5, 'M', 'θ'),
- (0x1D7C6, 'M', 'κ'),
- (0x1D7C7, 'M', 'φ'),
- (0x1D7C8, 'M', 'ρ'),
- (0x1D7C9, 'M', 'π'),
- (0x1D7CA, 'M', 'ϝ'),
- (0x1D7CC, 'X'),
- (0x1D7CE, 'M', '0'),
- (0x1D7CF, 'M', '1'),
- (0x1D7D0, 'M', '2'),
- (0x1D7D1, 'M', '3'),
- (0x1D7D2, 'M', '4'),
- (0x1D7D3, 'M', '5'),
- (0x1D7D4, 'M', '6'),
- (0x1D7D5, 'M', '7'),
- (0x1D7D6, 'M', '8'),
- (0x1D7D7, 'M', '9'),
- (0x1D7D8, 'M', '0'),
- (0x1D7D9, 'M', '1'),
- (0x1D7DA, 'M', '2'),
- (0x1D7DB, 'M', '3'),
- (0x1D7DC, 'M', '4'),
- (0x1D7DD, 'M', '5'),
- (0x1D7DE, 'M', '6'),
- (0x1D7DF, 'M', '7'),
- (0x1D7E0, 'M', '8'),
- (0x1D7E1, 'M', '9'),
- (0x1D7E2, 'M', '0'),
- (0x1D7E3, 'M', '1'),
- (0x1D7E4, 'M', '2'),
- (0x1D7E5, 'M', '3'),
- (0x1D7E6, 'M', '4'),
- (0x1D7E7, 'M', '5'),
- (0x1D7E8, 'M', '6'),
- (0x1D7E9, 'M', '7'),
- (0x1D7EA, 'M', '8'),
- (0x1D7EB, 'M', '9'),
- (0x1D7EC, 'M', '0'),
- (0x1D7ED, 'M', '1'),
- (0x1D7EE, 'M', '2'),
- (0x1D7EF, 'M', '3'),
- (0x1D7F0, 'M', '4'),
- (0x1D7F1, 'M', '5'),
- (0x1D7F2, 'M', '6'),
- (0x1D7F3, 'M', '7'),
- (0x1D7F4, 'M', '8'),
- (0x1D7F5, 'M', '9'),
- (0x1D7F6, 'M', '0'),
- (0x1D7F7, 'M', '1'),
- (0x1D7F8, 'M', '2'),
- (0x1D7F9, 'M', '3'),
- (0x1D7FA, 'M', '4'),
- (0x1D7FB, 'M', '5'),
- (0x1D7FC, 'M', '6'),
- (0x1D7FD, 'M', '7'),
- (0x1D7FE, 'M', '8'),
- (0x1D7FF, 'M', '9'),
+ (0x1D7EF, 'M', u'3'),
+ (0x1D7F0, 'M', u'4'),
+ (0x1D7F1, 'M', u'5'),
+ (0x1D7F2, 'M', u'6'),
+ (0x1D7F3, 'M', u'7'),
+ (0x1D7F4, 'M', u'8'),
+ (0x1D7F5, 'M', u'9'),
+ (0x1D7F6, 'M', u'0'),
+ (0x1D7F7, 'M', u'1'),
+ (0x1D7F8, 'M', u'2'),
+ (0x1D7F9, 'M', u'3'),
+ (0x1D7FA, 'M', u'4'),
+ (0x1D7FB, 'M', u'5'),
+ (0x1D7FC, 'M', u'6'),
+ (0x1D7FD, 'M', u'7'),
+ (0x1D7FE, 'M', u'8'),
+ (0x1D7FF, 'M', u'9'),
(0x1D800, 'V'),
(0x1DA8C, 'X'),
(0x1DA9B, 'V'),
(0x1DAA0, 'X'),
(0x1DAA1, 'V'),
(0x1DAB0, 'X'),
- (0x1DF00, 'V'),
- (0x1DF1F, 'X'),
(0x1E000, 'V'),
(0x1E007, 'X'),
(0x1E008, 'V'),
@@ -7369,261 +7112,233 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1E025, 'X'),
(0x1E026, 'V'),
(0x1E02B, 'X'),
- (0x1E100, 'V'),
- (0x1E12D, 'X'),
- (0x1E130, 'V'),
- (0x1E13E, 'X'),
- (0x1E140, 'V'),
- (0x1E14A, 'X'),
- (0x1E14E, 'V'),
- (0x1E150, 'X'),
- (0x1E290, 'V'),
- (0x1E2AF, 'X'),
- (0x1E2C0, 'V'),
- (0x1E2FA, 'X'),
- (0x1E2FF, 'V'),
- (0x1E300, 'X'),
- (0x1E7E0, 'V'),
- (0x1E7E7, 'X'),
- (0x1E7E8, 'V'),
- (0x1E7EC, 'X'),
- (0x1E7ED, 'V'),
- (0x1E7EF, 'X'),
- (0x1E7F0, 'V'),
- ]
-
-def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1E7FF, 'X'),
(0x1E800, 'V'),
(0x1E8C5, 'X'),
(0x1E8C7, 'V'),
(0x1E8D7, 'X'),
- (0x1E900, 'M', '𞤢'),
- (0x1E901, 'M', '𞤣'),
- (0x1E902, 'M', '𞤤'),
- (0x1E903, 'M', '𞤥'),
- (0x1E904, 'M', '𞤦'),
- (0x1E905, 'M', '𞤧'),
- (0x1E906, 'M', '𞤨'),
- (0x1E907, 'M', '𞤩'),
- (0x1E908, 'M', '𞤪'),
- (0x1E909, 'M', '𞤫'),
- (0x1E90A, 'M', '𞤬'),
- (0x1E90B, 'M', '𞤭'),
- (0x1E90C, 'M', '𞤮'),
- (0x1E90D, 'M', '𞤯'),
- (0x1E90E, 'M', '𞤰'),
- (0x1E90F, 'M', '𞤱'),
- (0x1E910, 'M', '𞤲'),
- (0x1E911, 'M', '𞤳'),
- (0x1E912, 'M', '𞤴'),
- (0x1E913, 'M', '𞤵'),
- (0x1E914, 'M', '𞤶'),
- (0x1E915, 'M', '𞤷'),
- (0x1E916, 'M', '𞤸'),
- (0x1E917, 'M', '𞤹'),
- (0x1E918, 'M', '𞤺'),
- (0x1E919, 'M', '𞤻'),
- (0x1E91A, 'M', '𞤼'),
- (0x1E91B, 'M', '𞤽'),
- (0x1E91C, 'M', '𞤾'),
- (0x1E91D, 'M', '𞤿'),
- (0x1E91E, 'M', '𞥀'),
- (0x1E91F, 'M', '𞥁'),
- (0x1E920, 'M', '𞥂'),
- (0x1E921, 'M', '𞥃'),
+ (0x1E900, 'M', u'𞤢'),
+ (0x1E901, 'M', u'𞤣'),
+ (0x1E902, 'M', u'𞤤'),
+ (0x1E903, 'M', u'𞤥'),
+ (0x1E904, 'M', u'𞤦'),
+ (0x1E905, 'M', u'𞤧'),
+ (0x1E906, 'M', u'𞤨'),
+ (0x1E907, 'M', u'𞤩'),
+ (0x1E908, 'M', u'𞤪'),
+ (0x1E909, 'M', u'𞤫'),
+ (0x1E90A, 'M', u'𞤬'),
+ (0x1E90B, 'M', u'𞤭'),
+ (0x1E90C, 'M', u'𞤮'),
+ (0x1E90D, 'M', u'𞤯'),
+ (0x1E90E, 'M', u'𞤰'),
+ (0x1E90F, 'M', u'𞤱'),
+ (0x1E910, 'M', u'𞤲'),
+ (0x1E911, 'M', u'𞤳'),
+ (0x1E912, 'M', u'𞤴'),
+ (0x1E913, 'M', u'𞤵'),
+ (0x1E914, 'M', u'𞤶'),
+ (0x1E915, 'M', u'𞤷'),
+ (0x1E916, 'M', u'𞤸'),
+ (0x1E917, 'M', u'𞤹'),
+ (0x1E918, 'M', u'𞤺'),
+ (0x1E919, 'M', u'𞤻'),
+ (0x1E91A, 'M', u'𞤼'),
+ (0x1E91B, 'M', u'𞤽'),
+ (0x1E91C, 'M', u'𞤾'),
+ (0x1E91D, 'M', u'𞤿'),
+ (0x1E91E, 'M', u'𞥀'),
+ (0x1E91F, 'M', u'𞥁'),
+ (0x1E920, 'M', u'𞥂'),
+ (0x1E921, 'M', u'𞥃'),
(0x1E922, 'V'),
- (0x1E94C, 'X'),
+ (0x1E94B, 'X'),
(0x1E950, 'V'),
(0x1E95A, 'X'),
(0x1E95E, 'V'),
(0x1E960, 'X'),
(0x1EC71, 'V'),
(0x1ECB5, 'X'),
- (0x1ED01, 'V'),
- (0x1ED3E, 'X'),
- (0x1EE00, 'M', 'ا'),
- (0x1EE01, 'M', 'ب'),
- (0x1EE02, 'M', 'ج'),
- (0x1EE03, 'M', 'د'),
+ (0x1EE00, 'M', u'ا'),
+ (0x1EE01, 'M', u'ب'),
+ (0x1EE02, 'M', u'ج'),
+ (0x1EE03, 'M', u'د'),
(0x1EE04, 'X'),
- (0x1EE05, 'M', 'و'),
- (0x1EE06, 'M', 'ز'),
- (0x1EE07, 'M', 'ح'),
- (0x1EE08, 'M', 'ط'),
- (0x1EE09, 'M', 'ي'),
- (0x1EE0A, 'M', 'ك'),
- (0x1EE0B, 'M', 'ل'),
- (0x1EE0C, 'M', 'م'),
- (0x1EE0D, 'M', 'ن'),
- (0x1EE0E, 'M', 'س'),
- (0x1EE0F, 'M', 'ع'),
- (0x1EE10, 'M', 'ف'),
- (0x1EE11, 'M', 'ص'),
- (0x1EE12, 'M', 'ق'),
- (0x1EE13, 'M', 'ر'),
- (0x1EE14, 'M', 'ش'),
- (0x1EE15, 'M', 'ت'),
- (0x1EE16, 'M', 'ث'),
- (0x1EE17, 'M', 'خ'),
- (0x1EE18, 'M', 'ذ'),
- (0x1EE19, 'M', 'ض'),
- (0x1EE1A, 'M', 'ظ'),
- (0x1EE1B, 'M', 'غ'),
- (0x1EE1C, 'M', 'ٮ'),
- (0x1EE1D, 'M', 'ں'),
- (0x1EE1E, 'M', 'ڡ'),
- (0x1EE1F, 'M', 'ٯ'),
+ (0x1EE05, 'M', u'و'),
+ (0x1EE06, 'M', u'ز'),
+ (0x1EE07, 'M', u'ح'),
+ (0x1EE08, 'M', u'ط'),
+ (0x1EE09, 'M', u'ي'),
+ (0x1EE0A, 'M', u'ك'),
+ (0x1EE0B, 'M', u'ل'),
+ (0x1EE0C, 'M', u'م'),
+ (0x1EE0D, 'M', u'ن'),
+ (0x1EE0E, 'M', u'س'),
+ (0x1EE0F, 'M', u'ع'),
+ (0x1EE10, 'M', u'ف'),
+ (0x1EE11, 'M', u'ص'),
+ (0x1EE12, 'M', u'ق'),
+ (0x1EE13, 'M', u'ر'),
+ (0x1EE14, 'M', u'ش'),
+ ]
+
+def _seg_69():
+ return [
+ (0x1EE15, 'M', u'ت'),
+ (0x1EE16, 'M', u'ث'),
+ (0x1EE17, 'M', u'خ'),
+ (0x1EE18, 'M', u'ذ'),
+ (0x1EE19, 'M', u'ض'),
+ (0x1EE1A, 'M', u'ظ'),
+ (0x1EE1B, 'M', u'غ'),
+ (0x1EE1C, 'M', u'ٮ'),
+ (0x1EE1D, 'M', u'ں'),
+ (0x1EE1E, 'M', u'ڡ'),
+ (0x1EE1F, 'M', u'ٯ'),
(0x1EE20, 'X'),
- (0x1EE21, 'M', 'ب'),
- (0x1EE22, 'M', 'ج'),
+ (0x1EE21, 'M', u'ب'),
+ (0x1EE22, 'M', u'ج'),
(0x1EE23, 'X'),
- (0x1EE24, 'M', 'ه'),
+ (0x1EE24, 'M', u'ه'),
(0x1EE25, 'X'),
- (0x1EE27, 'M', 'ح'),
+ (0x1EE27, 'M', u'ح'),
(0x1EE28, 'X'),
- (0x1EE29, 'M', 'ي'),
- (0x1EE2A, 'M', 'ك'),
- (0x1EE2B, 'M', 'ل'),
- (0x1EE2C, 'M', 'م'),
- (0x1EE2D, 'M', 'ن'),
- (0x1EE2E, 'M', 'س'),
- (0x1EE2F, 'M', 'ع'),
- (0x1EE30, 'M', 'ف'),
- (0x1EE31, 'M', 'ص'),
- (0x1EE32, 'M', 'ق'),
+ (0x1EE29, 'M', u'ي'),
+ (0x1EE2A, 'M', u'ك'),
+ (0x1EE2B, 'M', u'ل'),
+ (0x1EE2C, 'M', u'م'),
+ (0x1EE2D, 'M', u'ن'),
+ (0x1EE2E, 'M', u'س'),
+ (0x1EE2F, 'M', u'ع'),
+ (0x1EE30, 'M', u'ف'),
+ (0x1EE31, 'M', u'ص'),
+ (0x1EE32, 'M', u'ق'),
(0x1EE33, 'X'),
- ]
-
-def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
- (0x1EE34, 'M', 'ش'),
- (0x1EE35, 'M', 'ت'),
- (0x1EE36, 'M', 'ث'),
- (0x1EE37, 'M', 'خ'),
+ (0x1EE34, 'M', u'ش'),
+ (0x1EE35, 'M', u'ت'),
+ (0x1EE36, 'M', u'ث'),
+ (0x1EE37, 'M', u'خ'),
(0x1EE38, 'X'),
- (0x1EE39, 'M', 'ض'),
+ (0x1EE39, 'M', u'ض'),
(0x1EE3A, 'X'),
- (0x1EE3B, 'M', 'غ'),
+ (0x1EE3B, 'M', u'غ'),
(0x1EE3C, 'X'),
- (0x1EE42, 'M', 'ج'),
+ (0x1EE42, 'M', u'ج'),
(0x1EE43, 'X'),
- (0x1EE47, 'M', 'ح'),
+ (0x1EE47, 'M', u'ح'),
(0x1EE48, 'X'),
- (0x1EE49, 'M', 'ي'),
+ (0x1EE49, 'M', u'ي'),
(0x1EE4A, 'X'),
- (0x1EE4B, 'M', 'ل'),
+ (0x1EE4B, 'M', u'ل'),
(0x1EE4C, 'X'),
- (0x1EE4D, 'M', 'ن'),
- (0x1EE4E, 'M', 'س'),
- (0x1EE4F, 'M', 'ع'),
+ (0x1EE4D, 'M', u'ن'),
+ (0x1EE4E, 'M', u'س'),
+ (0x1EE4F, 'M', u'ع'),
(0x1EE50, 'X'),
- (0x1EE51, 'M', 'ص'),
- (0x1EE52, 'M', 'ق'),
+ (0x1EE51, 'M', u'ص'),
+ (0x1EE52, 'M', u'ق'),
(0x1EE53, 'X'),
- (0x1EE54, 'M', 'ش'),
+ (0x1EE54, 'M', u'ش'),
(0x1EE55, 'X'),
- (0x1EE57, 'M', 'خ'),
+ (0x1EE57, 'M', u'خ'),
(0x1EE58, 'X'),
- (0x1EE59, 'M', 'ض'),
+ (0x1EE59, 'M', u'ض'),
(0x1EE5A, 'X'),
- (0x1EE5B, 'M', 'غ'),
+ (0x1EE5B, 'M', u'غ'),
(0x1EE5C, 'X'),
- (0x1EE5D, 'M', 'ں'),
+ (0x1EE5D, 'M', u'ں'),
(0x1EE5E, 'X'),
- (0x1EE5F, 'M', 'ٯ'),
+ (0x1EE5F, 'M', u'ٯ'),
(0x1EE60, 'X'),
- (0x1EE61, 'M', 'ب'),
- (0x1EE62, 'M', 'ج'),
+ (0x1EE61, 'M', u'ب'),
+ (0x1EE62, 'M', u'ج'),
(0x1EE63, 'X'),
- (0x1EE64, 'M', 'ه'),
+ (0x1EE64, 'M', u'ه'),
(0x1EE65, 'X'),
- (0x1EE67, 'M', 'ح'),
- (0x1EE68, 'M', 'ط'),
- (0x1EE69, 'M', 'ي'),
- (0x1EE6A, 'M', 'ك'),
+ (0x1EE67, 'M', u'ح'),
+ (0x1EE68, 'M', u'ط'),
+ (0x1EE69, 'M', u'ي'),
+ (0x1EE6A, 'M', u'ك'),
(0x1EE6B, 'X'),
- (0x1EE6C, 'M', 'م'),
- (0x1EE6D, 'M', 'ن'),
- (0x1EE6E, 'M', 'س'),
- (0x1EE6F, 'M', 'ع'),
- (0x1EE70, 'M', 'ف'),
- (0x1EE71, 'M', 'ص'),
- (0x1EE72, 'M', 'ق'),
+ (0x1EE6C, 'M', u'م'),
+ (0x1EE6D, 'M', u'ن'),
+ (0x1EE6E, 'M', u'س'),
+ (0x1EE6F, 'M', u'ع'),
+ (0x1EE70, 'M', u'ف'),
+ (0x1EE71, 'M', u'ص'),
+ (0x1EE72, 'M', u'ق'),
(0x1EE73, 'X'),
- (0x1EE74, 'M', 'ش'),
- (0x1EE75, 'M', 'ت'),
- (0x1EE76, 'M', 'ث'),
- (0x1EE77, 'M', 'خ'),
+ (0x1EE74, 'M', u'ش'),
+ (0x1EE75, 'M', u'ت'),
+ (0x1EE76, 'M', u'ث'),
+ (0x1EE77, 'M', u'خ'),
(0x1EE78, 'X'),
- (0x1EE79, 'M', 'ض'),
- (0x1EE7A, 'M', 'ظ'),
- (0x1EE7B, 'M', 'غ'),
- (0x1EE7C, 'M', 'ٮ'),
+ (0x1EE79, 'M', u'ض'),
+ (0x1EE7A, 'M', u'ظ'),
+ (0x1EE7B, 'M', u'غ'),
+ (0x1EE7C, 'M', u'ٮ'),
(0x1EE7D, 'X'),
- (0x1EE7E, 'M', 'ڡ'),
+ (0x1EE7E, 'M', u'ڡ'),
(0x1EE7F, 'X'),
- (0x1EE80, 'M', 'ا'),
- (0x1EE81, 'M', 'ب'),
- (0x1EE82, 'M', 'ج'),
- (0x1EE83, 'M', 'د'),
- (0x1EE84, 'M', 'ه'),
- (0x1EE85, 'M', 'و'),
- (0x1EE86, 'M', 'ز'),
- (0x1EE87, 'M', 'ح'),
- (0x1EE88, 'M', 'ط'),
- (0x1EE89, 'M', 'ي'),
- (0x1EE8A, 'X'),
- (0x1EE8B, 'M', 'ل'),
- (0x1EE8C, 'M', 'م'),
- (0x1EE8D, 'M', 'ن'),
- (0x1EE8E, 'M', 'س'),
- (0x1EE8F, 'M', 'ع'),
- (0x1EE90, 'M', 'ف'),
- (0x1EE91, 'M', 'ص'),
- (0x1EE92, 'M', 'ق'),
- (0x1EE93, 'M', 'ر'),
- (0x1EE94, 'M', 'ش'),
- (0x1EE95, 'M', 'ت'),
- (0x1EE96, 'M', 'ث'),
- (0x1EE97, 'M', 'خ'),
- (0x1EE98, 'M', 'ذ'),
- (0x1EE99, 'M', 'ض'),
- (0x1EE9A, 'M', 'ظ'),
- (0x1EE9B, 'M', 'غ'),
- (0x1EE9C, 'X'),
- (0x1EEA1, 'M', 'ب'),
- (0x1EEA2, 'M', 'ج'),
- (0x1EEA3, 'M', 'د'),
- (0x1EEA4, 'X'),
- (0x1EEA5, 'M', 'و'),
+ (0x1EE80, 'M', u'ا'),
+ (0x1EE81, 'M', u'ب'),
+ (0x1EE82, 'M', u'ج'),
+ (0x1EE83, 'M', u'د'),
]
-def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_70():
return [
- (0x1EEA6, 'M', 'ز'),
- (0x1EEA7, 'M', 'ح'),
- (0x1EEA8, 'M', 'ط'),
- (0x1EEA9, 'M', 'ي'),
+ (0x1EE84, 'M', u'ه'),
+ (0x1EE85, 'M', u'و'),
+ (0x1EE86, 'M', u'ز'),
+ (0x1EE87, 'M', u'ح'),
+ (0x1EE88, 'M', u'ط'),
+ (0x1EE89, 'M', u'ي'),
+ (0x1EE8A, 'X'),
+ (0x1EE8B, 'M', u'ل'),
+ (0x1EE8C, 'M', u'م'),
+ (0x1EE8D, 'M', u'ن'),
+ (0x1EE8E, 'M', u'س'),
+ (0x1EE8F, 'M', u'ع'),
+ (0x1EE90, 'M', u'ف'),
+ (0x1EE91, 'M', u'ص'),
+ (0x1EE92, 'M', u'ق'),
+ (0x1EE93, 'M', u'ر'),
+ (0x1EE94, 'M', u'ش'),
+ (0x1EE95, 'M', u'ت'),
+ (0x1EE96, 'M', u'ث'),
+ (0x1EE97, 'M', u'خ'),
+ (0x1EE98, 'M', u'ذ'),
+ (0x1EE99, 'M', u'ض'),
+ (0x1EE9A, 'M', u'ظ'),
+ (0x1EE9B, 'M', u'غ'),
+ (0x1EE9C, 'X'),
+ (0x1EEA1, 'M', u'ب'),
+ (0x1EEA2, 'M', u'ج'),
+ (0x1EEA3, 'M', u'د'),
+ (0x1EEA4, 'X'),
+ (0x1EEA5, 'M', u'و'),
+ (0x1EEA6, 'M', u'ز'),
+ (0x1EEA7, 'M', u'ح'),
+ (0x1EEA8, 'M', u'ط'),
+ (0x1EEA9, 'M', u'ي'),
(0x1EEAA, 'X'),
- (0x1EEAB, 'M', 'ل'),
- (0x1EEAC, 'M', 'م'),
- (0x1EEAD, 'M', 'ن'),
- (0x1EEAE, 'M', 'س'),
- (0x1EEAF, 'M', 'ع'),
- (0x1EEB0, 'M', 'ف'),
- (0x1EEB1, 'M', 'ص'),
- (0x1EEB2, 'M', 'ق'),
- (0x1EEB3, 'M', 'ر'),
- (0x1EEB4, 'M', 'ش'),
- (0x1EEB5, 'M', 'ت'),
- (0x1EEB6, 'M', 'ث'),
- (0x1EEB7, 'M', 'خ'),
- (0x1EEB8, 'M', 'ذ'),
- (0x1EEB9, 'M', 'ض'),
- (0x1EEBA, 'M', 'ظ'),
- (0x1EEBB, 'M', 'غ'),
+ (0x1EEAB, 'M', u'ل'),
+ (0x1EEAC, 'M', u'م'),
+ (0x1EEAD, 'M', u'ن'),
+ (0x1EEAE, 'M', u'س'),
+ (0x1EEAF, 'M', u'ع'),
+ (0x1EEB0, 'M', u'ف'),
+ (0x1EEB1, 'M', u'ص'),
+ (0x1EEB2, 'M', u'ق'),
+ (0x1EEB3, 'M', u'ر'),
+ (0x1EEB4, 'M', u'ش'),
+ (0x1EEB5, 'M', u'ت'),
+ (0x1EEB6, 'M', u'ث'),
+ (0x1EEB7, 'M', u'خ'),
+ (0x1EEB8, 'M', u'ذ'),
+ (0x1EEB9, 'M', u'ض'),
+ (0x1EEBA, 'M', u'ظ'),
+ (0x1EEBB, 'M', u'غ'),
(0x1EEBC, 'X'),
(0x1EEF0, 'V'),
(0x1EEF2, 'X'),
@@ -7639,177 +7354,174 @@ def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1F0D0, 'X'),
(0x1F0D1, 'V'),
(0x1F0F6, 'X'),
- (0x1F101, '3', '0,'),
- (0x1F102, '3', '1,'),
- (0x1F103, '3', '2,'),
- (0x1F104, '3', '3,'),
- (0x1F105, '3', '4,'),
- (0x1F106, '3', '5,'),
- (0x1F107, '3', '6,'),
- (0x1F108, '3', '7,'),
- (0x1F109, '3', '8,'),
- (0x1F10A, '3', '9,'),
+ (0x1F101, '3', u'0,'),
+ (0x1F102, '3', u'1,'),
+ (0x1F103, '3', u'2,'),
+ (0x1F104, '3', u'3,'),
+ (0x1F105, '3', u'4,'),
+ (0x1F106, '3', u'5,'),
+ (0x1F107, '3', u'6,'),
+ (0x1F108, '3', u'7,'),
+ (0x1F109, '3', u'8,'),
+ (0x1F10A, '3', u'9,'),
(0x1F10B, 'V'),
- (0x1F110, '3', '(a)'),
- (0x1F111, '3', '(b)'),
- (0x1F112, '3', '(c)'),
- (0x1F113, '3', '(d)'),
- (0x1F114, '3', '(e)'),
- (0x1F115, '3', '(f)'),
- (0x1F116, '3', '(g)'),
- (0x1F117, '3', '(h)'),
- (0x1F118, '3', '(i)'),
- (0x1F119, '3', '(j)'),
- (0x1F11A, '3', '(k)'),
- (0x1F11B, '3', '(l)'),
- (0x1F11C, '3', '(m)'),
- (0x1F11D, '3', '(n)'),
- (0x1F11E, '3', '(o)'),
- (0x1F11F, '3', '(p)'),
- (0x1F120, '3', '(q)'),
- (0x1F121, '3', '(r)'),
- (0x1F122, '3', '(s)'),
- (0x1F123, '3', '(t)'),
- (0x1F124, '3', '(u)'),
- (0x1F125, '3', '(v)'),
- (0x1F126, '3', '(w)'),
- (0x1F127, '3', '(x)'),
- (0x1F128, '3', '(y)'),
- (0x1F129, '3', '(z)'),
- (0x1F12A, 'M', '〔s〕'),
- (0x1F12B, 'M', 'c'),
- (0x1F12C, 'M', 'r'),
- (0x1F12D, 'M', 'cd'),
- (0x1F12E, 'M', 'wz'),
- (0x1F12F, 'V'),
- (0x1F130, 'M', 'a'),
- (0x1F131, 'M', 'b'),
- (0x1F132, 'M', 'c'),
- (0x1F133, 'M', 'd'),
- (0x1F134, 'M', 'e'),
- (0x1F135, 'M', 'f'),
- (0x1F136, 'M', 'g'),
- (0x1F137, 'M', 'h'),
- (0x1F138, 'M', 'i'),
- (0x1F139, 'M', 'j'),
- (0x1F13A, 'M', 'k'),
- (0x1F13B, 'M', 'l'),
- (0x1F13C, 'M', 'm'),
- (0x1F13D, 'M', 'n'),
- (0x1F13E, 'M', 'o'),
- (0x1F13F, 'M', 'p'),
- (0x1F140, 'M', 'q'),
- (0x1F141, 'M', 'r'),
- (0x1F142, 'M', 's'),
- (0x1F143, 'M', 't'),
+ (0x1F10D, 'X'),
+ (0x1F110, '3', u'(a)'),
+ (0x1F111, '3', u'(b)'),
+ (0x1F112, '3', u'(c)'),
+ (0x1F113, '3', u'(d)'),
+ (0x1F114, '3', u'(e)'),
+ (0x1F115, '3', u'(f)'),
+ (0x1F116, '3', u'(g)'),
+ (0x1F117, '3', u'(h)'),
+ (0x1F118, '3', u'(i)'),
+ (0x1F119, '3', u'(j)'),
+ (0x1F11A, '3', u'(k)'),
+ (0x1F11B, '3', u'(l)'),
+ (0x1F11C, '3', u'(m)'),
+ (0x1F11D, '3', u'(n)'),
+ (0x1F11E, '3', u'(o)'),
+ (0x1F11F, '3', u'(p)'),
+ (0x1F120, '3', u'(q)'),
+ (0x1F121, '3', u'(r)'),
+ (0x1F122, '3', u'(s)'),
+ (0x1F123, '3', u'(t)'),
+ (0x1F124, '3', u'(u)'),
]
-def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_71():
return [
- (0x1F144, 'M', 'u'),
- (0x1F145, 'M', 'v'),
- (0x1F146, 'M', 'w'),
- (0x1F147, 'M', 'x'),
- (0x1F148, 'M', 'y'),
- (0x1F149, 'M', 'z'),
- (0x1F14A, 'M', 'hv'),
- (0x1F14B, 'M', 'mv'),
- (0x1F14C, 'M', 'sd'),
- (0x1F14D, 'M', 'ss'),
- (0x1F14E, 'M', 'ppv'),
- (0x1F14F, 'M', 'wc'),
+ (0x1F125, '3', u'(v)'),
+ (0x1F126, '3', u'(w)'),
+ (0x1F127, '3', u'(x)'),
+ (0x1F128, '3', u'(y)'),
+ (0x1F129, '3', u'(z)'),
+ (0x1F12A, 'M', u'〔s〕'),
+ (0x1F12B, 'M', u'c'),
+ (0x1F12C, 'M', u'r'),
+ (0x1F12D, 'M', u'cd'),
+ (0x1F12E, 'M', u'wz'),
+ (0x1F12F, 'V'),
+ (0x1F130, 'M', u'a'),
+ (0x1F131, 'M', u'b'),
+ (0x1F132, 'M', u'c'),
+ (0x1F133, 'M', u'd'),
+ (0x1F134, 'M', u'e'),
+ (0x1F135, 'M', u'f'),
+ (0x1F136, 'M', u'g'),
+ (0x1F137, 'M', u'h'),
+ (0x1F138, 'M', u'i'),
+ (0x1F139, 'M', u'j'),
+ (0x1F13A, 'M', u'k'),
+ (0x1F13B, 'M', u'l'),
+ (0x1F13C, 'M', u'm'),
+ (0x1F13D, 'M', u'n'),
+ (0x1F13E, 'M', u'o'),
+ (0x1F13F, 'M', u'p'),
+ (0x1F140, 'M', u'q'),
+ (0x1F141, 'M', u'r'),
+ (0x1F142, 'M', u's'),
+ (0x1F143, 'M', u't'),
+ (0x1F144, 'M', u'u'),
+ (0x1F145, 'M', u'v'),
+ (0x1F146, 'M', u'w'),
+ (0x1F147, 'M', u'x'),
+ (0x1F148, 'M', u'y'),
+ (0x1F149, 'M', u'z'),
+ (0x1F14A, 'M', u'hv'),
+ (0x1F14B, 'M', u'mv'),
+ (0x1F14C, 'M', u'sd'),
+ (0x1F14D, 'M', u'ss'),
+ (0x1F14E, 'M', u'ppv'),
+ (0x1F14F, 'M', u'wc'),
(0x1F150, 'V'),
- (0x1F16A, 'M', 'mc'),
- (0x1F16B, 'M', 'md'),
- (0x1F16C, 'M', 'mr'),
- (0x1F16D, 'V'),
- (0x1F190, 'M', 'dj'),
+ (0x1F16A, 'M', u'mc'),
+ (0x1F16B, 'M', u'md'),
+ (0x1F16C, 'X'),
+ (0x1F170, 'V'),
+ (0x1F190, 'M', u'dj'),
(0x1F191, 'V'),
- (0x1F1AE, 'X'),
+ (0x1F1AD, 'X'),
(0x1F1E6, 'V'),
- (0x1F200, 'M', 'ほか'),
- (0x1F201, 'M', 'ココ'),
- (0x1F202, 'M', 'サ'),
+ (0x1F200, 'M', u'ほか'),
+ (0x1F201, 'M', u'ココ'),
+ (0x1F202, 'M', u'サ'),
(0x1F203, 'X'),
- (0x1F210, 'M', '手'),
- (0x1F211, 'M', '字'),
- (0x1F212, 'M', '双'),
- (0x1F213, 'M', 'デ'),
- (0x1F214, 'M', '二'),
- (0x1F215, 'M', '多'),
- (0x1F216, 'M', '解'),
- (0x1F217, 'M', '天'),
- (0x1F218, 'M', '交'),
- (0x1F219, 'M', '映'),
- (0x1F21A, 'M', '無'),
- (0x1F21B, 'M', '料'),
- (0x1F21C, 'M', '前'),
- (0x1F21D, 'M', '後'),
- (0x1F21E, 'M', '再'),
- (0x1F21F, 'M', '新'),
- (0x1F220, 'M', '初'),
- (0x1F221, 'M', '終'),
- (0x1F222, 'M', '生'),
- (0x1F223, 'M', '販'),
- (0x1F224, 'M', '声'),
- (0x1F225, 'M', '吹'),
- (0x1F226, 'M', '演'),
- (0x1F227, 'M', '投'),
- (0x1F228, 'M', '捕'),
- (0x1F229, 'M', '一'),
- (0x1F22A, 'M', '三'),
- (0x1F22B, 'M', '遊'),
- (0x1F22C, 'M', '左'),
- (0x1F22D, 'M', '中'),
- (0x1F22E, 'M', '右'),
- (0x1F22F, 'M', '指'),
- (0x1F230, 'M', '走'),
- (0x1F231, 'M', '打'),
- (0x1F232, 'M', '禁'),
- (0x1F233, 'M', '空'),
- (0x1F234, 'M', '合'),
- (0x1F235, 'M', '満'),
- (0x1F236, 'M', '有'),
- (0x1F237, 'M', '月'),
- (0x1F238, 'M', '申'),
- (0x1F239, 'M', '割'),
- (0x1F23A, 'M', '営'),
- (0x1F23B, 'M', '配'),
+ (0x1F210, 'M', u'手'),
+ (0x1F211, 'M', u'字'),
+ (0x1F212, 'M', u'双'),
+ (0x1F213, 'M', u'デ'),
+ (0x1F214, 'M', u'二'),
+ (0x1F215, 'M', u'多'),
+ (0x1F216, 'M', u'解'),
+ (0x1F217, 'M', u'天'),
+ (0x1F218, 'M', u'交'),
+ (0x1F219, 'M', u'映'),
+ (0x1F21A, 'M', u'無'),
+ (0x1F21B, 'M', u'料'),
+ (0x1F21C, 'M', u'前'),
+ (0x1F21D, 'M', u'後'),
+ (0x1F21E, 'M', u'再'),
+ (0x1F21F, 'M', u'新'),
+ (0x1F220, 'M', u'初'),
+ (0x1F221, 'M', u'終'),
+ (0x1F222, 'M', u'生'),
+ (0x1F223, 'M', u'販'),
+ (0x1F224, 'M', u'声'),
+ (0x1F225, 'M', u'吹'),
+ (0x1F226, 'M', u'演'),
+ (0x1F227, 'M', u'投'),
+ (0x1F228, 'M', u'捕'),
+ (0x1F229, 'M', u'一'),
+ (0x1F22A, 'M', u'三'),
+ (0x1F22B, 'M', u'遊'),
+ (0x1F22C, 'M', u'左'),
+ (0x1F22D, 'M', u'中'),
+ (0x1F22E, 'M', u'右'),
+ (0x1F22F, 'M', u'指'),
+ (0x1F230, 'M', u'走'),
+ (0x1F231, 'M', u'打'),
+ (0x1F232, 'M', u'禁'),
+ (0x1F233, 'M', u'空'),
+ (0x1F234, 'M', u'合'),
+ (0x1F235, 'M', u'満'),
+ (0x1F236, 'M', u'有'),
+ (0x1F237, 'M', u'月'),
+ (0x1F238, 'M', u'申'),
+ (0x1F239, 'M', u'割'),
+ (0x1F23A, 'M', u'営'),
+ (0x1F23B, 'M', u'配'),
+ ]
+
+def _seg_72():
+ return [
(0x1F23C, 'X'),
- (0x1F240, 'M', '〔本〕'),
- (0x1F241, 'M', '〔三〕'),
- (0x1F242, 'M', '〔二〕'),
- (0x1F243, 'M', '〔安〕'),
- (0x1F244, 'M', '〔点〕'),
- (0x1F245, 'M', '〔打〕'),
- (0x1F246, 'M', '〔盗〕'),
- (0x1F247, 'M', '〔勝〕'),
- (0x1F248, 'M', '〔敗〕'),
+ (0x1F240, 'M', u'〔本〕'),
+ (0x1F241, 'M', u'〔三〕'),
+ (0x1F242, 'M', u'〔二〕'),
+ (0x1F243, 'M', u'〔安〕'),
+ (0x1F244, 'M', u'〔点〕'),
+ (0x1F245, 'M', u'〔打〕'),
+ (0x1F246, 'M', u'〔盗〕'),
+ (0x1F247, 'M', u'〔勝〕'),
+ (0x1F248, 'M', u'〔敗〕'),
(0x1F249, 'X'),
- (0x1F250, 'M', '得'),
- (0x1F251, 'M', '可'),
+ (0x1F250, 'M', u'得'),
+ (0x1F251, 'M', u'可'),
(0x1F252, 'X'),
(0x1F260, 'V'),
(0x1F266, 'X'),
(0x1F300, 'V'),
- (0x1F6D8, 'X'),
- (0x1F6DD, 'V'),
+ (0x1F6D5, 'X'),
+ (0x1F6E0, 'V'),
(0x1F6ED, 'X'),
(0x1F6F0, 'V'),
- (0x1F6FD, 'X'),
+ (0x1F6FA, 'X'),
(0x1F700, 'V'),
(0x1F774, 'X'),
(0x1F780, 'V'),
(0x1F7D9, 'X'),
- (0x1F7E0, 'V'),
- (0x1F7EC, 'X'),
- (0x1F7F0, 'V'),
- (0x1F7F1, 'X'),
(0x1F800, 'V'),
- ]
-
-def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
- return [
(0x1F80C, 'X'),
(0x1F810, 'V'),
(0x1F848, 'X'),
@@ -7819,611 +7531,594 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
(0x1F888, 'X'),
(0x1F890, 'V'),
(0x1F8AE, 'X'),
- (0x1F8B0, 'V'),
- (0x1F8B2, 'X'),
(0x1F900, 'V'),
- (0x1FA54, 'X'),
+ (0x1F90C, 'X'),
+ (0x1F910, 'V'),
+ (0x1F93F, 'X'),
+ (0x1F940, 'V'),
+ (0x1F971, 'X'),
+ (0x1F973, 'V'),
+ (0x1F977, 'X'),
+ (0x1F97A, 'V'),
+ (0x1F97B, 'X'),
+ (0x1F97C, 'V'),
+ (0x1F9A3, 'X'),
+ (0x1F9B0, 'V'),
+ (0x1F9BA, 'X'),
+ (0x1F9C0, 'V'),
+ (0x1F9C3, 'X'),
+ (0x1F9D0, 'V'),
+ (0x1FA00, 'X'),
(0x1FA60, 'V'),
(0x1FA6E, 'X'),
- (0x1FA70, 'V'),
- (0x1FA75, 'X'),
- (0x1FA78, 'V'),
- (0x1FA7D, 'X'),
- (0x1FA80, 'V'),
- (0x1FA87, 'X'),
- (0x1FA90, 'V'),
- (0x1FAAD, 'X'),
- (0x1FAB0, 'V'),
- (0x1FABB, 'X'),
- (0x1FAC0, 'V'),
- (0x1FAC6, 'X'),
- (0x1FAD0, 'V'),
- (0x1FADA, 'X'),
- (0x1FAE0, 'V'),
- (0x1FAE8, 'X'),
- (0x1FAF0, 'V'),
- (0x1FAF7, 'X'),
- (0x1FB00, 'V'),
- (0x1FB93, 'X'),
- (0x1FB94, 'V'),
- (0x1FBCB, 'X'),
- (0x1FBF0, 'M', '0'),
- (0x1FBF1, 'M', '1'),
- (0x1FBF2, 'M', '2'),
- (0x1FBF3, 'M', '3'),
- (0x1FBF4, 'M', '4'),
- (0x1FBF5, 'M', '5'),
- (0x1FBF6, 'M', '6'),
- (0x1FBF7, 'M', '7'),
- (0x1FBF8, 'M', '8'),
- (0x1FBF9, 'M', '9'),
- (0x1FBFA, 'X'),
(0x20000, 'V'),
- (0x2A6E0, 'X'),
+ (0x2A6D7, 'X'),
(0x2A700, 'V'),
- (0x2B739, 'X'),
+ (0x2B735, 'X'),
(0x2B740, 'V'),
(0x2B81E, 'X'),
(0x2B820, 'V'),
(0x2CEA2, 'X'),
(0x2CEB0, 'V'),
(0x2EBE1, 'X'),
- (0x2F800, 'M', '丽'),
- (0x2F801, 'M', '丸'),
- (0x2F802, 'M', '乁'),
- (0x2F803, 'M', '𠄢'),
- (0x2F804, 'M', '你'),
- (0x2F805, 'M', '侮'),
- (0x2F806, 'M', '侻'),
- (0x2F807, 'M', '倂'),
- (0x2F808, 'M', '偺'),
- (0x2F809, 'M', '備'),
- (0x2F80A, 'M', '僧'),
- (0x2F80B, 'M', '像'),
- (0x2F80C, 'M', '㒞'),
- (0x2F80D, 'M', '𠘺'),
- (0x2F80E, 'M', '免'),
- (0x2F80F, 'M', '兔'),
- (0x2F810, 'M', '兤'),
- (0x2F811, 'M', '具'),
- (0x2F812, 'M', '𠔜'),
- (0x2F813, 'M', '㒹'),
- (0x2F814, 'M', '內'),
- (0x2F815, 'M', '再'),
- (0x2F816, 'M', '𠕋'),
- (0x2F817, 'M', '冗'),
- (0x2F818, 'M', '冤'),
- (0x2F819, 'M', '仌'),
- (0x2F81A, 'M', '冬'),
- (0x2F81B, 'M', '况'),
- (0x2F81C, 'M', '𩇟'),
- (0x2F81D, 'M', '凵'),
- (0x2F81E, 'M', '刃'),
- (0x2F81F, 'M', '㓟'),
- (0x2F820, 'M', '刻'),
- (0x2F821, 'M', '剆'),
- (0x2F822, 'M', '割'),
- (0x2F823, 'M', '剷'),
- (0x2F824, 'M', '㔕'),
- (0x2F825, 'M', '勇'),
- (0x2F826, 'M', '勉'),
- (0x2F827, 'M', '勤'),
- (0x2F828, 'M', '勺'),
- (0x2F829, 'M', '包'),
+ (0x2F800, 'M', u'丽'),
+ (0x2F801, 'M', u'丸'),
+ (0x2F802, 'M', u'乁'),
+ (0x2F803, 'M', u'𠄢'),
+ (0x2F804, 'M', u'你'),
+ (0x2F805, 'M', u'侮'),
+ (0x2F806, 'M', u'侻'),
+ (0x2F807, 'M', u'倂'),
+ (0x2F808, 'M', u'偺'),
+ (0x2F809, 'M', u'備'),
+ (0x2F80A, 'M', u'僧'),
+ (0x2F80B, 'M', u'像'),
+ (0x2F80C, 'M', u'㒞'),
+ (0x2F80D, 'M', u'𠘺'),
+ (0x2F80E, 'M', u'免'),
+ (0x2F80F, 'M', u'兔'),
+ (0x2F810, 'M', u'兤'),
+ (0x2F811, 'M', u'具'),
+ (0x2F812, 'M', u'𠔜'),
+ (0x2F813, 'M', u'㒹'),
+ (0x2F814, 'M', u'內'),
+ (0x2F815, 'M', u'再'),
+ (0x2F816, 'M', u'𠕋'),
+ (0x2F817, 'M', u'冗'),
+ (0x2F818, 'M', u'冤'),
+ (0x2F819, 'M', u'仌'),
+ (0x2F81A, 'M', u'冬'),
+ (0x2F81B, 'M', u'况'),
+ (0x2F81C, 'M', u'𩇟'),
+ (0x2F81D, 'M', u'凵'),
+ (0x2F81E, 'M', u'刃'),
+ (0x2F81F, 'M', u'㓟'),
+ (0x2F820, 'M', u'刻'),
+ (0x2F821, 'M', u'剆'),
]
-def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_73():
return [
- (0x2F82A, 'M', '匆'),
- (0x2F82B, 'M', '北'),
- (0x2F82C, 'M', '卉'),
- (0x2F82D, 'M', '卑'),
- (0x2F82E, 'M', '博'),
- (0x2F82F, 'M', '即'),
- (0x2F830, 'M', '卽'),
- (0x2F831, 'M', '卿'),
- (0x2F834, 'M', '𠨬'),
- (0x2F835, 'M', '灰'),
- (0x2F836, 'M', '及'),
- (0x2F837, 'M', '叟'),
- (0x2F838, 'M', '𠭣'),
- (0x2F839, 'M', '叫'),
- (0x2F83A, 'M', '叱'),
- (0x2F83B, 'M', '吆'),
- (0x2F83C, 'M', '咞'),
- (0x2F83D, 'M', '吸'),
- (0x2F83E, 'M', '呈'),
- (0x2F83F, 'M', '周'),
- (0x2F840, 'M', '咢'),
- (0x2F841, 'M', '哶'),
- (0x2F842, 'M', '唐'),
- (0x2F843, 'M', '啓'),
- (0x2F844, 'M', '啣'),
- (0x2F845, 'M', '善'),
- (0x2F847, 'M', '喙'),
- (0x2F848, 'M', '喫'),
- (0x2F849, 'M', '喳'),
- (0x2F84A, 'M', '嗂'),
- (0x2F84B, 'M', '圖'),
- (0x2F84C, 'M', '嘆'),
- (0x2F84D, 'M', '圗'),
- (0x2F84E, 'M', '噑'),
- (0x2F84F, 'M', '噴'),
- (0x2F850, 'M', '切'),
- (0x2F851, 'M', '壮'),
- (0x2F852, 'M', '城'),
- (0x2F853, 'M', '埴'),
- (0x2F854, 'M', '堍'),
- (0x2F855, 'M', '型'),
- (0x2F856, 'M', '堲'),
- (0x2F857, 'M', '報'),
- (0x2F858, 'M', '墬'),
- (0x2F859, 'M', '𡓤'),
- (0x2F85A, 'M', '売'),
- (0x2F85B, 'M', '壷'),
- (0x2F85C, 'M', '夆'),
- (0x2F85D, 'M', '多'),
- (0x2F85E, 'M', '夢'),
- (0x2F85F, 'M', '奢'),
- (0x2F860, 'M', '𡚨'),
- (0x2F861, 'M', '𡛪'),
- (0x2F862, 'M', '姬'),
- (0x2F863, 'M', '娛'),
- (0x2F864, 'M', '娧'),
- (0x2F865, 'M', '姘'),
- (0x2F866, 'M', '婦'),
- (0x2F867, 'M', '㛮'),
+ (0x2F822, 'M', u'割'),
+ (0x2F823, 'M', u'剷'),
+ (0x2F824, 'M', u'㔕'),
+ (0x2F825, 'M', u'勇'),
+ (0x2F826, 'M', u'勉'),
+ (0x2F827, 'M', u'勤'),
+ (0x2F828, 'M', u'勺'),
+ (0x2F829, 'M', u'包'),
+ (0x2F82A, 'M', u'匆'),
+ (0x2F82B, 'M', u'北'),
+ (0x2F82C, 'M', u'卉'),
+ (0x2F82D, 'M', u'卑'),
+ (0x2F82E, 'M', u'博'),
+ (0x2F82F, 'M', u'即'),
+ (0x2F830, 'M', u'卽'),
+ (0x2F831, 'M', u'卿'),
+ (0x2F834, 'M', u'𠨬'),
+ (0x2F835, 'M', u'灰'),
+ (0x2F836, 'M', u'及'),
+ (0x2F837, 'M', u'叟'),
+ (0x2F838, 'M', u'𠭣'),
+ (0x2F839, 'M', u'叫'),
+ (0x2F83A, 'M', u'叱'),
+ (0x2F83B, 'M', u'吆'),
+ (0x2F83C, 'M', u'咞'),
+ (0x2F83D, 'M', u'吸'),
+ (0x2F83E, 'M', u'呈'),
+ (0x2F83F, 'M', u'周'),
+ (0x2F840, 'M', u'咢'),
+ (0x2F841, 'M', u'哶'),
+ (0x2F842, 'M', u'唐'),
+ (0x2F843, 'M', u'啓'),
+ (0x2F844, 'M', u'啣'),
+ (0x2F845, 'M', u'善'),
+ (0x2F847, 'M', u'喙'),
+ (0x2F848, 'M', u'喫'),
+ (0x2F849, 'M', u'喳'),
+ (0x2F84A, 'M', u'嗂'),
+ (0x2F84B, 'M', u'圖'),
+ (0x2F84C, 'M', u'嘆'),
+ (0x2F84D, 'M', u'圗'),
+ (0x2F84E, 'M', u'噑'),
+ (0x2F84F, 'M', u'噴'),
+ (0x2F850, 'M', u'切'),
+ (0x2F851, 'M', u'壮'),
+ (0x2F852, 'M', u'城'),
+ (0x2F853, 'M', u'埴'),
+ (0x2F854, 'M', u'堍'),
+ (0x2F855, 'M', u'型'),
+ (0x2F856, 'M', u'堲'),
+ (0x2F857, 'M', u'報'),
+ (0x2F858, 'M', u'墬'),
+ (0x2F859, 'M', u'𡓤'),
+ (0x2F85A, 'M', u'売'),
+ (0x2F85B, 'M', u'壷'),
+ (0x2F85C, 'M', u'夆'),
+ (0x2F85D, 'M', u'多'),
+ (0x2F85E, 'M', u'夢'),
+ (0x2F85F, 'M', u'奢'),
+ (0x2F860, 'M', u'𡚨'),
+ (0x2F861, 'M', u'𡛪'),
+ (0x2F862, 'M', u'姬'),
+ (0x2F863, 'M', u'娛'),
+ (0x2F864, 'M', u'娧'),
+ (0x2F865, 'M', u'姘'),
+ (0x2F866, 'M', u'婦'),
+ (0x2F867, 'M', u'㛮'),
(0x2F868, 'X'),
- (0x2F869, 'M', '嬈'),
- (0x2F86A, 'M', '嬾'),
- (0x2F86C, 'M', '𡧈'),
- (0x2F86D, 'M', '寃'),
- (0x2F86E, 'M', '寘'),
- (0x2F86F, 'M', '寧'),
- (0x2F870, 'M', '寳'),
- (0x2F871, 'M', '𡬘'),
- (0x2F872, 'M', '寿'),
- (0x2F873, 'M', '将'),
+ (0x2F869, 'M', u'嬈'),
+ (0x2F86A, 'M', u'嬾'),
+ (0x2F86C, 'M', u'𡧈'),
+ (0x2F86D, 'M', u'寃'),
+ (0x2F86E, 'M', u'寘'),
+ (0x2F86F, 'M', u'寧'),
+ (0x2F870, 'M', u'寳'),
+ (0x2F871, 'M', u'𡬘'),
+ (0x2F872, 'M', u'寿'),
+ (0x2F873, 'M', u'将'),
(0x2F874, 'X'),
- (0x2F875, 'M', '尢'),
- (0x2F876, 'M', '㞁'),
- (0x2F877, 'M', '屠'),
- (0x2F878, 'M', '屮'),
- (0x2F879, 'M', '峀'),
- (0x2F87A, 'M', '岍'),
- (0x2F87B, 'M', '𡷤'),
- (0x2F87C, 'M', '嵃'),
- (0x2F87D, 'M', '𡷦'),
- (0x2F87E, 'M', '嵮'),
- (0x2F87F, 'M', '嵫'),
- (0x2F880, 'M', '嵼'),
- (0x2F881, 'M', '巡'),
- (0x2F882, 'M', '巢'),
- (0x2F883, 'M', '㠯'),
- (0x2F884, 'M', '巽'),
- (0x2F885, 'M', '帨'),
- (0x2F886, 'M', '帽'),
- (0x2F887, 'M', '幩'),
- (0x2F888, 'M', '㡢'),
- (0x2F889, 'M', '𢆃'),
- (0x2F88A, 'M', '㡼'),
- (0x2F88B, 'M', '庰'),
- (0x2F88C, 'M', '庳'),
- (0x2F88D, 'M', '庶'),
- (0x2F88E, 'M', '廊'),
- (0x2F88F, 'M', '𪎒'),
- (0x2F890, 'M', '廾'),
- (0x2F891, 'M', '𢌱'),
+ (0x2F875, 'M', u'尢'),
+ (0x2F876, 'M', u'㞁'),
+ (0x2F877, 'M', u'屠'),
+ (0x2F878, 'M', u'屮'),
+ (0x2F879, 'M', u'峀'),
+ (0x2F87A, 'M', u'岍'),
+ (0x2F87B, 'M', u'𡷤'),
+ (0x2F87C, 'M', u'嵃'),
+ (0x2F87D, 'M', u'𡷦'),
+ (0x2F87E, 'M', u'嵮'),
+ (0x2F87F, 'M', u'嵫'),
+ (0x2F880, 'M', u'嵼'),
+ (0x2F881, 'M', u'巡'),
+ (0x2F882, 'M', u'巢'),
+ (0x2F883, 'M', u'㠯'),
+ (0x2F884, 'M', u'巽'),
+ (0x2F885, 'M', u'帨'),
+ (0x2F886, 'M', u'帽'),
+ (0x2F887, 'M', u'幩'),
+ (0x2F888, 'M', u'㡢'),
+ (0x2F889, 'M', u'𢆃'),
]
-def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_74():
return [
- (0x2F893, 'M', '舁'),
- (0x2F894, 'M', '弢'),
- (0x2F896, 'M', '㣇'),
- (0x2F897, 'M', '𣊸'),
- (0x2F898, 'M', '𦇚'),
- (0x2F899, 'M', '形'),
- (0x2F89A, 'M', '彫'),
- (0x2F89B, 'M', '㣣'),
- (0x2F89C, 'M', '徚'),
- (0x2F89D, 'M', '忍'),
- (0x2F89E, 'M', '志'),
- (0x2F89F, 'M', '忹'),
- (0x2F8A0, 'M', '悁'),
- (0x2F8A1, 'M', '㤺'),
- (0x2F8A2, 'M', '㤜'),
- (0x2F8A3, 'M', '悔'),
- (0x2F8A4, 'M', '𢛔'),
- (0x2F8A5, 'M', '惇'),
- (0x2F8A6, 'M', '慈'),
- (0x2F8A7, 'M', '慌'),
- (0x2F8A8, 'M', '慎'),
- (0x2F8A9, 'M', '慌'),
- (0x2F8AA, 'M', '慺'),
- (0x2F8AB, 'M', '憎'),
- (0x2F8AC, 'M', '憲'),
- (0x2F8AD, 'M', '憤'),
- (0x2F8AE, 'M', '憯'),
- (0x2F8AF, 'M', '懞'),
- (0x2F8B0, 'M', '懲'),
- (0x2F8B1, 'M', '懶'),
- (0x2F8B2, 'M', '成'),
- (0x2F8B3, 'M', '戛'),
- (0x2F8B4, 'M', '扝'),
- (0x2F8B5, 'M', '抱'),
- (0x2F8B6, 'M', '拔'),
- (0x2F8B7, 'M', '捐'),
- (0x2F8B8, 'M', '𢬌'),
- (0x2F8B9, 'M', '挽'),
- (0x2F8BA, 'M', '拼'),
- (0x2F8BB, 'M', '捨'),
- (0x2F8BC, 'M', '掃'),
- (0x2F8BD, 'M', '揤'),
- (0x2F8BE, 'M', '𢯱'),
- (0x2F8BF, 'M', '搢'),
- (0x2F8C0, 'M', '揅'),
- (0x2F8C1, 'M', '掩'),
- (0x2F8C2, 'M', '㨮'),
- (0x2F8C3, 'M', '摩'),
- (0x2F8C4, 'M', '摾'),
- (0x2F8C5, 'M', '撝'),
- (0x2F8C6, 'M', '摷'),
- (0x2F8C7, 'M', '㩬'),
- (0x2F8C8, 'M', '敏'),
- (0x2F8C9, 'M', '敬'),
- (0x2F8CA, 'M', '𣀊'),
- (0x2F8CB, 'M', '旣'),
- (0x2F8CC, 'M', '書'),
- (0x2F8CD, 'M', '晉'),
- (0x2F8CE, 'M', '㬙'),
- (0x2F8CF, 'M', '暑'),
- (0x2F8D0, 'M', '㬈'),
- (0x2F8D1, 'M', '㫤'),
- (0x2F8D2, 'M', '冒'),
- (0x2F8D3, 'M', '冕'),
- (0x2F8D4, 'M', '最'),
- (0x2F8D5, 'M', '暜'),
- (0x2F8D6, 'M', '肭'),
- (0x2F8D7, 'M', '䏙'),
- (0x2F8D8, 'M', '朗'),
- (0x2F8D9, 'M', '望'),
- (0x2F8DA, 'M', '朡'),
- (0x2F8DB, 'M', '杞'),
- (0x2F8DC, 'M', '杓'),
- (0x2F8DD, 'M', '𣏃'),
- (0x2F8DE, 'M', '㭉'),
- (0x2F8DF, 'M', '柺'),
- (0x2F8E0, 'M', '枅'),
- (0x2F8E1, 'M', '桒'),
- (0x2F8E2, 'M', '梅'),
- (0x2F8E3, 'M', '𣑭'),
- (0x2F8E4, 'M', '梎'),
- (0x2F8E5, 'M', '栟'),
- (0x2F8E6, 'M', '椔'),
- (0x2F8E7, 'M', '㮝'),
- (0x2F8E8, 'M', '楂'),
- (0x2F8E9, 'M', '榣'),
- (0x2F8EA, 'M', '槪'),
- (0x2F8EB, 'M', '檨'),
- (0x2F8EC, 'M', '𣚣'),
- (0x2F8ED, 'M', '櫛'),
- (0x2F8EE, 'M', '㰘'),
- (0x2F8EF, 'M', '次'),
- (0x2F8F0, 'M', '𣢧'),
- (0x2F8F1, 'M', '歔'),
- (0x2F8F2, 'M', '㱎'),
- (0x2F8F3, 'M', '歲'),
- (0x2F8F4, 'M', '殟'),
- (0x2F8F5, 'M', '殺'),
- (0x2F8F6, 'M', '殻'),
- (0x2F8F7, 'M', '𣪍'),
+ (0x2F88A, 'M', u'㡼'),
+ (0x2F88B, 'M', u'庰'),
+ (0x2F88C, 'M', u'庳'),
+ (0x2F88D, 'M', u'庶'),
+ (0x2F88E, 'M', u'廊'),
+ (0x2F88F, 'M', u'𪎒'),
+ (0x2F890, 'M', u'廾'),
+ (0x2F891, 'M', u'𢌱'),
+ (0x2F893, 'M', u'舁'),
+ (0x2F894, 'M', u'弢'),
+ (0x2F896, 'M', u'㣇'),
+ (0x2F897, 'M', u'𣊸'),
+ (0x2F898, 'M', u'𦇚'),
+ (0x2F899, 'M', u'形'),
+ (0x2F89A, 'M', u'彫'),
+ (0x2F89B, 'M', u'㣣'),
+ (0x2F89C, 'M', u'徚'),
+ (0x2F89D, 'M', u'忍'),
+ (0x2F89E, 'M', u'志'),
+ (0x2F89F, 'M', u'忹'),
+ (0x2F8A0, 'M', u'悁'),
+ (0x2F8A1, 'M', u'㤺'),
+ (0x2F8A2, 'M', u'㤜'),
+ (0x2F8A3, 'M', u'悔'),
+ (0x2F8A4, 'M', u'𢛔'),
+ (0x2F8A5, 'M', u'惇'),
+ (0x2F8A6, 'M', u'慈'),
+ (0x2F8A7, 'M', u'慌'),
+ (0x2F8A8, 'M', u'慎'),
+ (0x2F8A9, 'M', u'慌'),
+ (0x2F8AA, 'M', u'慺'),
+ (0x2F8AB, 'M', u'憎'),
+ (0x2F8AC, 'M', u'憲'),
+ (0x2F8AD, 'M', u'憤'),
+ (0x2F8AE, 'M', u'憯'),
+ (0x2F8AF, 'M', u'懞'),
+ (0x2F8B0, 'M', u'懲'),
+ (0x2F8B1, 'M', u'懶'),
+ (0x2F8B2, 'M', u'成'),
+ (0x2F8B3, 'M', u'戛'),
+ (0x2F8B4, 'M', u'扝'),
+ (0x2F8B5, 'M', u'抱'),
+ (0x2F8B6, 'M', u'拔'),
+ (0x2F8B7, 'M', u'捐'),
+ (0x2F8B8, 'M', u'𢬌'),
+ (0x2F8B9, 'M', u'挽'),
+ (0x2F8BA, 'M', u'拼'),
+ (0x2F8BB, 'M', u'捨'),
+ (0x2F8BC, 'M', u'掃'),
+ (0x2F8BD, 'M', u'揤'),
+ (0x2F8BE, 'M', u'𢯱'),
+ (0x2F8BF, 'M', u'搢'),
+ (0x2F8C0, 'M', u'揅'),
+ (0x2F8C1, 'M', u'掩'),
+ (0x2F8C2, 'M', u'㨮'),
+ (0x2F8C3, 'M', u'摩'),
+ (0x2F8C4, 'M', u'摾'),
+ (0x2F8C5, 'M', u'撝'),
+ (0x2F8C6, 'M', u'摷'),
+ (0x2F8C7, 'M', u'㩬'),
+ (0x2F8C8, 'M', u'敏'),
+ (0x2F8C9, 'M', u'敬'),
+ (0x2F8CA, 'M', u'𣀊'),
+ (0x2F8CB, 'M', u'旣'),
+ (0x2F8CC, 'M', u'書'),
+ (0x2F8CD, 'M', u'晉'),
+ (0x2F8CE, 'M', u'㬙'),
+ (0x2F8CF, 'M', u'暑'),
+ (0x2F8D0, 'M', u'㬈'),
+ (0x2F8D1, 'M', u'㫤'),
+ (0x2F8D2, 'M', u'冒'),
+ (0x2F8D3, 'M', u'冕'),
+ (0x2F8D4, 'M', u'最'),
+ (0x2F8D5, 'M', u'暜'),
+ (0x2F8D6, 'M', u'肭'),
+ (0x2F8D7, 'M', u'䏙'),
+ (0x2F8D8, 'M', u'朗'),
+ (0x2F8D9, 'M', u'望'),
+ (0x2F8DA, 'M', u'朡'),
+ (0x2F8DB, 'M', u'杞'),
+ (0x2F8DC, 'M', u'杓'),
+ (0x2F8DD, 'M', u'𣏃'),
+ (0x2F8DE, 'M', u'㭉'),
+ (0x2F8DF, 'M', u'柺'),
+ (0x2F8E0, 'M', u'枅'),
+ (0x2F8E1, 'M', u'桒'),
+ (0x2F8E2, 'M', u'梅'),
+ (0x2F8E3, 'M', u'𣑭'),
+ (0x2F8E4, 'M', u'梎'),
+ (0x2F8E5, 'M', u'栟'),
+ (0x2F8E6, 'M', u'椔'),
+ (0x2F8E7, 'M', u'㮝'),
+ (0x2F8E8, 'M', u'楂'),
+ (0x2F8E9, 'M', u'榣'),
+ (0x2F8EA, 'M', u'槪'),
+ (0x2F8EB, 'M', u'檨'),
+ (0x2F8EC, 'M', u'𣚣'),
+ (0x2F8ED, 'M', u'櫛'),
+ (0x2F8EE, 'M', u'㰘'),
+ (0x2F8EF, 'M', u'次'),
]
-def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_75():
return [
- (0x2F8F8, 'M', '𡴋'),
- (0x2F8F9, 'M', '𣫺'),
- (0x2F8FA, 'M', '汎'),
- (0x2F8FB, 'M', '𣲼'),
- (0x2F8FC, 'M', '沿'),
- (0x2F8FD, 'M', '泍'),
- (0x2F8FE, 'M', '汧'),
- (0x2F8FF, 'M', '洖'),
- (0x2F900, 'M', '派'),
- (0x2F901, 'M', '海'),
- (0x2F902, 'M', '流'),
- (0x2F903, 'M', '浩'),
- (0x2F904, 'M', '浸'),
- (0x2F905, 'M', '涅'),
- (0x2F906, 'M', '𣴞'),
- (0x2F907, 'M', '洴'),
- (0x2F908, 'M', '港'),
- (0x2F909, 'M', '湮'),
- (0x2F90A, 'M', '㴳'),
- (0x2F90B, 'M', '滋'),
- (0x2F90C, 'M', '滇'),
- (0x2F90D, 'M', '𣻑'),
- (0x2F90E, 'M', '淹'),
- (0x2F90F, 'M', '潮'),
- (0x2F910, 'M', '𣽞'),
- (0x2F911, 'M', '𣾎'),
- (0x2F912, 'M', '濆'),
- (0x2F913, 'M', '瀹'),
- (0x2F914, 'M', '瀞'),
- (0x2F915, 'M', '瀛'),
- (0x2F916, 'M', '㶖'),
- (0x2F917, 'M', '灊'),
- (0x2F918, 'M', '災'),
- (0x2F919, 'M', '灷'),
- (0x2F91A, 'M', '炭'),
- (0x2F91B, 'M', '𠔥'),
- (0x2F91C, 'M', '煅'),
- (0x2F91D, 'M', '𤉣'),
- (0x2F91E, 'M', '熜'),
+ (0x2F8F0, 'M', u'𣢧'),
+ (0x2F8F1, 'M', u'歔'),
+ (0x2F8F2, 'M', u'㱎'),
+ (0x2F8F3, 'M', u'歲'),
+ (0x2F8F4, 'M', u'殟'),
+ (0x2F8F5, 'M', u'殺'),
+ (0x2F8F6, 'M', u'殻'),
+ (0x2F8F7, 'M', u'𣪍'),
+ (0x2F8F8, 'M', u'𡴋'),
+ (0x2F8F9, 'M', u'𣫺'),
+ (0x2F8FA, 'M', u'汎'),
+ (0x2F8FB, 'M', u'𣲼'),
+ (0x2F8FC, 'M', u'沿'),
+ (0x2F8FD, 'M', u'泍'),
+ (0x2F8FE, 'M', u'汧'),
+ (0x2F8FF, 'M', u'洖'),
+ (0x2F900, 'M', u'派'),
+ (0x2F901, 'M', u'海'),
+ (0x2F902, 'M', u'流'),
+ (0x2F903, 'M', u'浩'),
+ (0x2F904, 'M', u'浸'),
+ (0x2F905, 'M', u'涅'),
+ (0x2F906, 'M', u'𣴞'),
+ (0x2F907, 'M', u'洴'),
+ (0x2F908, 'M', u'港'),
+ (0x2F909, 'M', u'湮'),
+ (0x2F90A, 'M', u'㴳'),
+ (0x2F90B, 'M', u'滋'),
+ (0x2F90C, 'M', u'滇'),
+ (0x2F90D, 'M', u'𣻑'),
+ (0x2F90E, 'M', u'淹'),
+ (0x2F90F, 'M', u'潮'),
+ (0x2F910, 'M', u'𣽞'),
+ (0x2F911, 'M', u'𣾎'),
+ (0x2F912, 'M', u'濆'),
+ (0x2F913, 'M', u'瀹'),
+ (0x2F914, 'M', u'瀞'),
+ (0x2F915, 'M', u'瀛'),
+ (0x2F916, 'M', u'㶖'),
+ (0x2F917, 'M', u'灊'),
+ (0x2F918, 'M', u'災'),
+ (0x2F919, 'M', u'灷'),
+ (0x2F91A, 'M', u'炭'),
+ (0x2F91B, 'M', u'𠔥'),
+ (0x2F91C, 'M', u'煅'),
+ (0x2F91D, 'M', u'𤉣'),
+ (0x2F91E, 'M', u'熜'),
(0x2F91F, 'X'),
- (0x2F920, 'M', '爨'),
- (0x2F921, 'M', '爵'),
- (0x2F922, 'M', '牐'),
- (0x2F923, 'M', '𤘈'),
- (0x2F924, 'M', '犀'),
- (0x2F925, 'M', '犕'),
- (0x2F926, 'M', '𤜵'),
- (0x2F927, 'M', '𤠔'),
- (0x2F928, 'M', '獺'),
- (0x2F929, 'M', '王'),
- (0x2F92A, 'M', '㺬'),
- (0x2F92B, 'M', '玥'),
- (0x2F92C, 'M', '㺸'),
- (0x2F92E, 'M', '瑇'),
- (0x2F92F, 'M', '瑜'),
- (0x2F930, 'M', '瑱'),
- (0x2F931, 'M', '璅'),
- (0x2F932, 'M', '瓊'),
- (0x2F933, 'M', '㼛'),
- (0x2F934, 'M', '甤'),
- (0x2F935, 'M', '𤰶'),
- (0x2F936, 'M', '甾'),
- (0x2F937, 'M', '𤲒'),
- (0x2F938, 'M', '異'),
- (0x2F939, 'M', '𢆟'),
- (0x2F93A, 'M', '瘐'),
- (0x2F93B, 'M', '𤾡'),
- (0x2F93C, 'M', '𤾸'),
- (0x2F93D, 'M', '𥁄'),
- (0x2F93E, 'M', '㿼'),
- (0x2F93F, 'M', '䀈'),
- (0x2F940, 'M', '直'),
- (0x2F941, 'M', '𥃳'),
- (0x2F942, 'M', '𥃲'),
- (0x2F943, 'M', '𥄙'),
- (0x2F944, 'M', '𥄳'),
- (0x2F945, 'M', '眞'),
- (0x2F946, 'M', '真'),
- (0x2F948, 'M', '睊'),
- (0x2F949, 'M', '䀹'),
- (0x2F94A, 'M', '瞋'),
- (0x2F94B, 'M', '䁆'),
- (0x2F94C, 'M', '䂖'),
- (0x2F94D, 'M', '𥐝'),
- (0x2F94E, 'M', '硎'),
- (0x2F94F, 'M', '碌'),
- (0x2F950, 'M', '磌'),
- (0x2F951, 'M', '䃣'),
- (0x2F952, 'M', '𥘦'),
- (0x2F953, 'M', '祖'),
- (0x2F954, 'M', '𥚚'),
- (0x2F955, 'M', '𥛅'),
- (0x2F956, 'M', '福'),
- (0x2F957, 'M', '秫'),
- (0x2F958, 'M', '䄯'),
- (0x2F959, 'M', '穀'),
- (0x2F95A, 'M', '穊'),
- (0x2F95B, 'M', '穏'),
- (0x2F95C, 'M', '𥥼'),
- (0x2F95D, 'M', '𥪧'),
+ (0x2F920, 'M', u'爨'),
+ (0x2F921, 'M', u'爵'),
+ (0x2F922, 'M', u'牐'),
+ (0x2F923, 'M', u'𤘈'),
+ (0x2F924, 'M', u'犀'),
+ (0x2F925, 'M', u'犕'),
+ (0x2F926, 'M', u'𤜵'),
+ (0x2F927, 'M', u'𤠔'),
+ (0x2F928, 'M', u'獺'),
+ (0x2F929, 'M', u'王'),
+ (0x2F92A, 'M', u'㺬'),
+ (0x2F92B, 'M', u'玥'),
+ (0x2F92C, 'M', u'㺸'),
+ (0x2F92E, 'M', u'瑇'),
+ (0x2F92F, 'M', u'瑜'),
+ (0x2F930, 'M', u'瑱'),
+ (0x2F931, 'M', u'璅'),
+ (0x2F932, 'M', u'瓊'),
+ (0x2F933, 'M', u'㼛'),
+ (0x2F934, 'M', u'甤'),
+ (0x2F935, 'M', u'𤰶'),
+ (0x2F936, 'M', u'甾'),
+ (0x2F937, 'M', u'𤲒'),
+ (0x2F938, 'M', u'異'),
+ (0x2F939, 'M', u'𢆟'),
+ (0x2F93A, 'M', u'瘐'),
+ (0x2F93B, 'M', u'𤾡'),
+ (0x2F93C, 'M', u'𤾸'),
+ (0x2F93D, 'M', u'𥁄'),
+ (0x2F93E, 'M', u'㿼'),
+ (0x2F93F, 'M', u'䀈'),
+ (0x2F940, 'M', u'直'),
+ (0x2F941, 'M', u'𥃳'),
+ (0x2F942, 'M', u'𥃲'),
+ (0x2F943, 'M', u'𥄙'),
+ (0x2F944, 'M', u'𥄳'),
+ (0x2F945, 'M', u'眞'),
+ (0x2F946, 'M', u'真'),
+ (0x2F948, 'M', u'睊'),
+ (0x2F949, 'M', u'䀹'),
+ (0x2F94A, 'M', u'瞋'),
+ (0x2F94B, 'M', u'䁆'),
+ (0x2F94C, 'M', u'䂖'),
+ (0x2F94D, 'M', u'𥐝'),
+ (0x2F94E, 'M', u'硎'),
+ (0x2F94F, 'M', u'碌'),
+ (0x2F950, 'M', u'磌'),
+ (0x2F951, 'M', u'䃣'),
+ (0x2F952, 'M', u'𥘦'),
+ (0x2F953, 'M', u'祖'),
+ (0x2F954, 'M', u'𥚚'),
+ (0x2F955, 'M', u'𥛅'),
]
-def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_76():
return [
+ (0x2F956, 'M', u'福'),
+ (0x2F957, 'M', u'秫'),
+ (0x2F958, 'M', u'䄯'),
+ (0x2F959, 'M', u'穀'),
+ (0x2F95A, 'M', u'穊'),
+ (0x2F95B, 'M', u'穏'),
+ (0x2F95C, 'M', u'𥥼'),
+ (0x2F95D, 'M', u'𥪧'),
(0x2F95F, 'X'),
- (0x2F960, 'M', '䈂'),
- (0x2F961, 'M', '𥮫'),
- (0x2F962, 'M', '篆'),
- (0x2F963, 'M', '築'),
- (0x2F964, 'M', '䈧'),
- (0x2F965, 'M', '𥲀'),
- (0x2F966, 'M', '糒'),
- (0x2F967, 'M', '䊠'),
- (0x2F968, 'M', '糨'),
- (0x2F969, 'M', '糣'),
- (0x2F96A, 'M', '紀'),
- (0x2F96B, 'M', '𥾆'),
- (0x2F96C, 'M', '絣'),
- (0x2F96D, 'M', '䌁'),
- (0x2F96E, 'M', '緇'),
- (0x2F96F, 'M', '縂'),
- (0x2F970, 'M', '繅'),
- (0x2F971, 'M', '䌴'),
- (0x2F972, 'M', '𦈨'),
- (0x2F973, 'M', '𦉇'),
- (0x2F974, 'M', '䍙'),
- (0x2F975, 'M', '𦋙'),
- (0x2F976, 'M', '罺'),
- (0x2F977, 'M', '𦌾'),
- (0x2F978, 'M', '羕'),
- (0x2F979, 'M', '翺'),
- (0x2F97A, 'M', '者'),
- (0x2F97B, 'M', '𦓚'),
- (0x2F97C, 'M', '𦔣'),
- (0x2F97D, 'M', '聠'),
- (0x2F97E, 'M', '𦖨'),
- (0x2F97F, 'M', '聰'),
- (0x2F980, 'M', '𣍟'),
- (0x2F981, 'M', '䏕'),
- (0x2F982, 'M', '育'),
- (0x2F983, 'M', '脃'),
- (0x2F984, 'M', '䐋'),
- (0x2F985, 'M', '脾'),
- (0x2F986, 'M', '媵'),
- (0x2F987, 'M', '𦞧'),
- (0x2F988, 'M', '𦞵'),
- (0x2F989, 'M', '𣎓'),
- (0x2F98A, 'M', '𣎜'),
- (0x2F98B, 'M', '舁'),
- (0x2F98C, 'M', '舄'),
- (0x2F98D, 'M', '辞'),
- (0x2F98E, 'M', '䑫'),
- (0x2F98F, 'M', '芑'),
- (0x2F990, 'M', '芋'),
- (0x2F991, 'M', '芝'),
- (0x2F992, 'M', '劳'),
- (0x2F993, 'M', '花'),
- (0x2F994, 'M', '芳'),
- (0x2F995, 'M', '芽'),
- (0x2F996, 'M', '苦'),
- (0x2F997, 'M', '𦬼'),
- (0x2F998, 'M', '若'),
- (0x2F999, 'M', '茝'),
- (0x2F99A, 'M', '荣'),
- (0x2F99B, 'M', '莭'),
- (0x2F99C, 'M', '茣'),
- (0x2F99D, 'M', '莽'),
- (0x2F99E, 'M', '菧'),
- (0x2F99F, 'M', '著'),
- (0x2F9A0, 'M', '荓'),
- (0x2F9A1, 'M', '菊'),
- (0x2F9A2, 'M', '菌'),
- (0x2F9A3, 'M', '菜'),
- (0x2F9A4, 'M', '𦰶'),
- (0x2F9A5, 'M', '𦵫'),
- (0x2F9A6, 'M', '𦳕'),
- (0x2F9A7, 'M', '䔫'),
- (0x2F9A8, 'M', '蓱'),
- (0x2F9A9, 'M', '蓳'),
- (0x2F9AA, 'M', '蔖'),
- (0x2F9AB, 'M', '𧏊'),
- (0x2F9AC, 'M', '蕤'),
- (0x2F9AD, 'M', '𦼬'),
- (0x2F9AE, 'M', '䕝'),
- (0x2F9AF, 'M', '䕡'),
- (0x2F9B0, 'M', '𦾱'),
- (0x2F9B1, 'M', '𧃒'),
- (0x2F9B2, 'M', '䕫'),
- (0x2F9B3, 'M', '虐'),
- (0x2F9B4, 'M', '虜'),
- (0x2F9B5, 'M', '虧'),
- (0x2F9B6, 'M', '虩'),
- (0x2F9B7, 'M', '蚩'),
- (0x2F9B8, 'M', '蚈'),
- (0x2F9B9, 'M', '蜎'),
- (0x2F9BA, 'M', '蛢'),
- (0x2F9BB, 'M', '蝹'),
- (0x2F9BC, 'M', '蜨'),
- (0x2F9BD, 'M', '蝫'),
- (0x2F9BE, 'M', '螆'),
- (0x2F9BF, 'X'),
- (0x2F9C0, 'M', '蟡'),
- (0x2F9C1, 'M', '蠁'),
- (0x2F9C2, 'M', '䗹'),
+ (0x2F960, 'M', u'䈂'),
+ (0x2F961, 'M', u'𥮫'),
+ (0x2F962, 'M', u'篆'),
+ (0x2F963, 'M', u'築'),
+ (0x2F964, 'M', u'䈧'),
+ (0x2F965, 'M', u'𥲀'),
+ (0x2F966, 'M', u'糒'),
+ (0x2F967, 'M', u'䊠'),
+ (0x2F968, 'M', u'糨'),
+ (0x2F969, 'M', u'糣'),
+ (0x2F96A, 'M', u'紀'),
+ (0x2F96B, 'M', u'𥾆'),
+ (0x2F96C, 'M', u'絣'),
+ (0x2F96D, 'M', u'䌁'),
+ (0x2F96E, 'M', u'緇'),
+ (0x2F96F, 'M', u'縂'),
+ (0x2F970, 'M', u'繅'),
+ (0x2F971, 'M', u'䌴'),
+ (0x2F972, 'M', u'𦈨'),
+ (0x2F973, 'M', u'𦉇'),
+ (0x2F974, 'M', u'䍙'),
+ (0x2F975, 'M', u'𦋙'),
+ (0x2F976, 'M', u'罺'),
+ (0x2F977, 'M', u'𦌾'),
+ (0x2F978, 'M', u'羕'),
+ (0x2F979, 'M', u'翺'),
+ (0x2F97A, 'M', u'者'),
+ (0x2F97B, 'M', u'𦓚'),
+ (0x2F97C, 'M', u'𦔣'),
+ (0x2F97D, 'M', u'聠'),
+ (0x2F97E, 'M', u'𦖨'),
+ (0x2F97F, 'M', u'聰'),
+ (0x2F980, 'M', u'𣍟'),
+ (0x2F981, 'M', u'䏕'),
+ (0x2F982, 'M', u'育'),
+ (0x2F983, 'M', u'脃'),
+ (0x2F984, 'M', u'䐋'),
+ (0x2F985, 'M', u'脾'),
+ (0x2F986, 'M', u'媵'),
+ (0x2F987, 'M', u'𦞧'),
+ (0x2F988, 'M', u'𦞵'),
+ (0x2F989, 'M', u'𣎓'),
+ (0x2F98A, 'M', u'𣎜'),
+ (0x2F98B, 'M', u'舁'),
+ (0x2F98C, 'M', u'舄'),
+ (0x2F98D, 'M', u'辞'),
+ (0x2F98E, 'M', u'䑫'),
+ (0x2F98F, 'M', u'芑'),
+ (0x2F990, 'M', u'芋'),
+ (0x2F991, 'M', u'芝'),
+ (0x2F992, 'M', u'劳'),
+ (0x2F993, 'M', u'花'),
+ (0x2F994, 'M', u'芳'),
+ (0x2F995, 'M', u'芽'),
+ (0x2F996, 'M', u'苦'),
+ (0x2F997, 'M', u'𦬼'),
+ (0x2F998, 'M', u'若'),
+ (0x2F999, 'M', u'茝'),
+ (0x2F99A, 'M', u'荣'),
+ (0x2F99B, 'M', u'莭'),
+ (0x2F99C, 'M', u'茣'),
+ (0x2F99D, 'M', u'莽'),
+ (0x2F99E, 'M', u'菧'),
+ (0x2F99F, 'M', u'著'),
+ (0x2F9A0, 'M', u'荓'),
+ (0x2F9A1, 'M', u'菊'),
+ (0x2F9A2, 'M', u'菌'),
+ (0x2F9A3, 'M', u'菜'),
+ (0x2F9A4, 'M', u'𦰶'),
+ (0x2F9A5, 'M', u'𦵫'),
+ (0x2F9A6, 'M', u'𦳕'),
+ (0x2F9A7, 'M', u'䔫'),
+ (0x2F9A8, 'M', u'蓱'),
+ (0x2F9A9, 'M', u'蓳'),
+ (0x2F9AA, 'M', u'蔖'),
+ (0x2F9AB, 'M', u'𧏊'),
+ (0x2F9AC, 'M', u'蕤'),
+ (0x2F9AD, 'M', u'𦼬'),
+ (0x2F9AE, 'M', u'䕝'),
+ (0x2F9AF, 'M', u'䕡'),
+ (0x2F9B0, 'M', u'𦾱'),
+ (0x2F9B1, 'M', u'𧃒'),
+ (0x2F9B2, 'M', u'䕫'),
+ (0x2F9B3, 'M', u'虐'),
+ (0x2F9B4, 'M', u'虜'),
+ (0x2F9B5, 'M', u'虧'),
+ (0x2F9B6, 'M', u'虩'),
+ (0x2F9B7, 'M', u'蚩'),
+ (0x2F9B8, 'M', u'蚈'),
+ (0x2F9B9, 'M', u'蜎'),
+ (0x2F9BA, 'M', u'蛢'),
]
-def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+def _seg_77():
return [
- (0x2F9C3, 'M', '衠'),
- (0x2F9C4, 'M', '衣'),
- (0x2F9C5, 'M', '𧙧'),
- (0x2F9C6, 'M', '裗'),
- (0x2F9C7, 'M', '裞'),
- (0x2F9C8, 'M', '䘵'),
- (0x2F9C9, 'M', '裺'),
- (0x2F9CA, 'M', '㒻'),
- (0x2F9CB, 'M', '𧢮'),
- (0x2F9CC, 'M', '𧥦'),
- (0x2F9CD, 'M', '䚾'),
- (0x2F9CE, 'M', '䛇'),
- (0x2F9CF, 'M', '誠'),
- (0x2F9D0, 'M', '諭'),
- (0x2F9D1, 'M', '變'),
- (0x2F9D2, 'M', '豕'),
- (0x2F9D3, 'M', '𧲨'),
- (0x2F9D4, 'M', '貫'),
- (0x2F9D5, 'M', '賁'),
- (0x2F9D6, 'M', '贛'),
- (0x2F9D7, 'M', '起'),
- (0x2F9D8, 'M', '𧼯'),
- (0x2F9D9, 'M', '𠠄'),
- (0x2F9DA, 'M', '跋'),
- (0x2F9DB, 'M', '趼'),
- (0x2F9DC, 'M', '跰'),
- (0x2F9DD, 'M', '𠣞'),
- (0x2F9DE, 'M', '軔'),
- (0x2F9DF, 'M', '輸'),
- (0x2F9E0, 'M', '𨗒'),
- (0x2F9E1, 'M', '𨗭'),
- (0x2F9E2, 'M', '邔'),
- (0x2F9E3, 'M', '郱'),
- (0x2F9E4, 'M', '鄑'),
- (0x2F9E5, 'M', '𨜮'),
- (0x2F9E6, 'M', '鄛'),
- (0x2F9E7, 'M', '鈸'),
- (0x2F9E8, 'M', '鋗'),
- (0x2F9E9, 'M', '鋘'),
- (0x2F9EA, 'M', '鉼'),
- (0x2F9EB, 'M', '鏹'),
- (0x2F9EC, 'M', '鐕'),
- (0x2F9ED, 'M', '𨯺'),
- (0x2F9EE, 'M', '開'),
- (0x2F9EF, 'M', '䦕'),
- (0x2F9F0, 'M', '閷'),
- (0x2F9F1, 'M', '𨵷'),
- (0x2F9F2, 'M', '䧦'),
- (0x2F9F3, 'M', '雃'),
- (0x2F9F4, 'M', '嶲'),
- (0x2F9F5, 'M', '霣'),
- (0x2F9F6, 'M', '𩅅'),
- (0x2F9F7, 'M', '𩈚'),
- (0x2F9F8, 'M', '䩮'),
- (0x2F9F9, 'M', '䩶'),
- (0x2F9FA, 'M', '韠'),
- (0x2F9FB, 'M', '𩐊'),
- (0x2F9FC, 'M', '䪲'),
- (0x2F9FD, 'M', '𩒖'),
- (0x2F9FE, 'M', '頋'),
- (0x2FA00, 'M', '頩'),
- (0x2FA01, 'M', '𩖶'),
- (0x2FA02, 'M', '飢'),
- (0x2FA03, 'M', '䬳'),
- (0x2FA04, 'M', '餩'),
- (0x2FA05, 'M', '馧'),
- (0x2FA06, 'M', '駂'),
- (0x2FA07, 'M', '駾'),
- (0x2FA08, 'M', '䯎'),
- (0x2FA09, 'M', '𩬰'),
- (0x2FA0A, 'M', '鬒'),
- (0x2FA0B, 'M', '鱀'),
- (0x2FA0C, 'M', '鳽'),
- (0x2FA0D, 'M', '䳎'),
- (0x2FA0E, 'M', '䳭'),
- (0x2FA0F, 'M', '鵧'),
- (0x2FA10, 'M', '𪃎'),
- (0x2FA11, 'M', '䳸'),
- (0x2FA12, 'M', '𪄅'),
- (0x2FA13, 'M', '𪈎'),
- (0x2FA14, 'M', '𪊑'),
- (0x2FA15, 'M', '麻'),
- (0x2FA16, 'M', '䵖'),
- (0x2FA17, 'M', '黹'),
- (0x2FA18, 'M', '黾'),
- (0x2FA19, 'M', '鼅'),
- (0x2FA1A, 'M', '鼏'),
- (0x2FA1B, 'M', '鼖'),
- (0x2FA1C, 'M', '鼻'),
- (0x2FA1D, 'M', '𪘀'),
+ (0x2F9BB, 'M', u'蝹'),
+ (0x2F9BC, 'M', u'蜨'),
+ (0x2F9BD, 'M', u'蝫'),
+ (0x2F9BE, 'M', u'螆'),
+ (0x2F9BF, 'X'),
+ (0x2F9C0, 'M', u'蟡'),
+ (0x2F9C1, 'M', u'蠁'),
+ (0x2F9C2, 'M', u'䗹'),
+ (0x2F9C3, 'M', u'衠'),
+ (0x2F9C4, 'M', u'衣'),
+ (0x2F9C5, 'M', u'𧙧'),
+ (0x2F9C6, 'M', u'裗'),
+ (0x2F9C7, 'M', u'裞'),
+ (0x2F9C8, 'M', u'䘵'),
+ (0x2F9C9, 'M', u'裺'),
+ (0x2F9CA, 'M', u'㒻'),
+ (0x2F9CB, 'M', u'𧢮'),
+ (0x2F9CC, 'M', u'𧥦'),
+ (0x2F9CD, 'M', u'䚾'),
+ (0x2F9CE, 'M', u'䛇'),
+ (0x2F9CF, 'M', u'誠'),
+ (0x2F9D0, 'M', u'諭'),
+ (0x2F9D1, 'M', u'變'),
+ (0x2F9D2, 'M', u'豕'),
+ (0x2F9D3, 'M', u'𧲨'),
+ (0x2F9D4, 'M', u'貫'),
+ (0x2F9D5, 'M', u'賁'),
+ (0x2F9D6, 'M', u'贛'),
+ (0x2F9D7, 'M', u'起'),
+ (0x2F9D8, 'M', u'𧼯'),
+ (0x2F9D9, 'M', u'𠠄'),
+ (0x2F9DA, 'M', u'跋'),
+ (0x2F9DB, 'M', u'趼'),
+ (0x2F9DC, 'M', u'跰'),
+ (0x2F9DD, 'M', u'𠣞'),
+ (0x2F9DE, 'M', u'軔'),
+ (0x2F9DF, 'M', u'輸'),
+ (0x2F9E0, 'M', u'𨗒'),
+ (0x2F9E1, 'M', u'𨗭'),
+ (0x2F9E2, 'M', u'邔'),
+ (0x2F9E3, 'M', u'郱'),
+ (0x2F9E4, 'M', u'鄑'),
+ (0x2F9E5, 'M', u'𨜮'),
+ (0x2F9E6, 'M', u'鄛'),
+ (0x2F9E7, 'M', u'鈸'),
+ (0x2F9E8, 'M', u'鋗'),
+ (0x2F9E9, 'M', u'鋘'),
+ (0x2F9EA, 'M', u'鉼'),
+ (0x2F9EB, 'M', u'鏹'),
+ (0x2F9EC, 'M', u'鐕'),
+ (0x2F9ED, 'M', u'𨯺'),
+ (0x2F9EE, 'M', u'開'),
+ (0x2F9EF, 'M', u'䦕'),
+ (0x2F9F0, 'M', u'閷'),
+ (0x2F9F1, 'M', u'𨵷'),
+ (0x2F9F2, 'M', u'䧦'),
+ (0x2F9F3, 'M', u'雃'),
+ (0x2F9F4, 'M', u'嶲'),
+ (0x2F9F5, 'M', u'霣'),
+ (0x2F9F6, 'M', u'𩅅'),
+ (0x2F9F7, 'M', u'𩈚'),
+ (0x2F9F8, 'M', u'䩮'),
+ (0x2F9F9, 'M', u'䩶'),
+ (0x2F9FA, 'M', u'韠'),
+ (0x2F9FB, 'M', u'𩐊'),
+ (0x2F9FC, 'M', u'䪲'),
+ (0x2F9FD, 'M', u'𩒖'),
+ (0x2F9FE, 'M', u'頋'),
+ (0x2FA00, 'M', u'頩'),
+ (0x2FA01, 'M', u'𩖶'),
+ (0x2FA02, 'M', u'飢'),
+ (0x2FA03, 'M', u'䬳'),
+ (0x2FA04, 'M', u'餩'),
+ (0x2FA05, 'M', u'馧'),
+ (0x2FA06, 'M', u'駂'),
+ (0x2FA07, 'M', u'駾'),
+ (0x2FA08, 'M', u'䯎'),
+ (0x2FA09, 'M', u'𩬰'),
+ (0x2FA0A, 'M', u'鬒'),
+ (0x2FA0B, 'M', u'鱀'),
+ (0x2FA0C, 'M', u'鳽'),
+ (0x2FA0D, 'M', u'䳎'),
+ (0x2FA0E, 'M', u'䳭'),
+ (0x2FA0F, 'M', u'鵧'),
+ (0x2FA10, 'M', u'𪃎'),
+ (0x2FA11, 'M', u'䳸'),
+ (0x2FA12, 'M', u'𪄅'),
+ (0x2FA13, 'M', u'𪈎'),
+ (0x2FA14, 'M', u'𪊑'),
+ (0x2FA15, 'M', u'麻'),
+ (0x2FA16, 'M', u'䵖'),
+ (0x2FA17, 'M', u'黹'),
+ (0x2FA18, 'M', u'黾'),
+ (0x2FA19, 'M', u'鼅'),
+ (0x2FA1A, 'M', u'鼏'),
+ (0x2FA1B, 'M', u'鼖'),
+ (0x2FA1C, 'M', u'鼻'),
+ (0x2FA1D, 'M', u'𪘀'),
(0x2FA1E, 'X'),
- (0x30000, 'V'),
- (0x3134B, 'X'),
(0xE0100, 'I'),
+ ]
+
+def _seg_78():
+ return [
(0xE01F0, 'X'),
]
@@ -8507,6 +8202,4 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ _seg_76()
+ _seg_77()
+ _seg_78()
- + _seg_79()
- + _seg_80()
-) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...]
+)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/LICENSE
deleted file mode 100644
index be7e092b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/LICENSE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2017-2019 Jason R. Coombs, Barry Warsaw
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/METADATA
deleted file mode 100644
index d4c34743..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/METADATA
+++ /dev/null
@@ -1,118 +0,0 @@
-Metadata-Version: 2.1
-Name: importlib-metadata
-Version: 4.11.3
-Summary: Read metadata from Python packages
-Home-page: https://github.com/python/importlib_metadata
-Author: Jason R. Coombs
-Author-email: jaraco@jaraco.com
-License: UNKNOWN
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3 :: Only
-Requires-Python: >=3.7
-License-File: LICENSE
-Requires-Dist: zipp (>=0.5)
-Requires-Dist: typing-extensions (>=3.6.4) ; python_version < "3.8"
-Provides-Extra: docs
-Requires-Dist: sphinx ; extra == 'docs'
-Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs'
-Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
-Provides-Extra: perf
-Requires-Dist: ipython ; extra == 'perf'
-Provides-Extra: testing
-Requires-Dist: pytest (>=6) ; extra == 'testing'
-Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
-Requires-Dist: pytest-flake8 ; extra == 'testing'
-Requires-Dist: pytest-cov ; extra == 'testing'
-Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing'
-Requires-Dist: packaging ; extra == 'testing'
-Requires-Dist: pyfakefs ; extra == 'testing'
-Requires-Dist: flufl.flake8 ; extra == 'testing'
-Requires-Dist: pytest-perf (>=0.9.2) ; extra == 'testing'
-Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
-Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing'
-Requires-Dist: importlib-resources (>=1.3) ; (python_version < "3.9") and extra == 'testing'
-
-.. image:: https://img.shields.io/pypi/v/importlib_metadata.svg
- :target: `PyPI link`_
-
-.. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg
- :target: `PyPI link`_
-
-.. _PyPI link: https://pypi.org/project/importlib_metadata
-
-.. image:: https://github.com/python/importlib_metadata/workflows/tests/badge.svg
- :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22
- :alt: tests
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
-.. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest
- :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest
-
-.. image:: https://img.shields.io/badge/skeleton-2022-informational
- :target: https://blog.jaraco.com/skeleton
-
-
-Library to access the metadata for a Python package.
-
-This package supplies third-party access to the functionality of
-`importlib.metadata `_
-including improvements added to subsequent Python versions.
-
-
-Compatibility
-=============
-
-New features are introduced in this third-party library and later merged
-into CPython. The following table indicates which versions of this library
-were contributed to different versions in the standard library:
-
-.. list-table::
- :header-rows: 1
-
- * - importlib_metadata
- - stdlib
- * - 4.8
- - 3.11
- * - 4.4
- - 3.10
- * - 1.4
- - 3.8
-
-
-Usage
-=====
-
-See the `online documentation `_
-for usage details.
-
-`Finder authors
-`_ can
-also add support for custom package installers. See the above documentation
-for details.
-
-
-Caveats
-=======
-
-This project primarily supports third-party packages installed by PyPA
-tools (or other conforming packages). It does not support:
-
-- Packages in the stdlib.
-- Packages installed without metadata.
-
-Project details
-===============
-
- * Project home: https://github.com/python/importlib_metadata
- * Report bugs at: https://github.com/python/importlib_metadata/issues
- * Code hosting: https://github.com/python/importlib_metadata
- * Documentation: https://importlib_metadata.readthedocs.io/
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/RECORD
deleted file mode 100644
index f32cdab7..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/RECORD
+++ /dev/null
@@ -1,15 +0,0 @@
-importlib_metadata-4.11.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-importlib_metadata-4.11.3.dist-info/LICENSE,sha256=wNe6dAchmJ1VvVB8D9oTc-gHHadCuaSBAev36sYEM6U,571
-importlib_metadata-4.11.3.dist-info/METADATA,sha256=QDN8bGG98uILiLVIoBDBL7qf1y40Vb_Pvp1fxPJtmS0,3997
-importlib_metadata-4.11.3.dist-info/RECORD,,
-importlib_metadata-4.11.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-importlib_metadata-4.11.3.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19
-importlib_metadata/__init__.py,sha256=7VjJ9nthPlNrL_TriHRiT5Ta-ZBsBnXrZq65pRfGNIs,30889
-importlib_metadata/_adapters.py,sha256=B6fCi5-8mLVDFUZj3krI5nAo-mKp1dH_qIavyIyFrJs,1862
-importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743
-importlib_metadata/_compat.py,sha256=EU2XCFBPFByuI0Of6XkAuBYbzqSyjwwwwqmsK4ccna0,1826
-importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895
-importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068
-importlib_metadata/_meta.py,sha256=_F48Hu_jFxkfKWz5wcYS8vO23qEygbVdF9r-6qh-hjE,1154
-importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166
-importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/WHEEL
deleted file mode 100644
index becc9a66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/top_level.txt
deleted file mode 100644
index bbb07547..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata-4.11.3.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-importlib_metadata
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/__init__.py
deleted file mode 100644
index 5ac8be23..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/__init__.py
+++ /dev/null
@@ -1,1075 +0,0 @@
-import os
-import re
-import abc
-import csv
-import sys
-import zipp
-import email
-import pathlib
-import operator
-import textwrap
-import warnings
-import functools
-import itertools
-import posixpath
-import collections
-
-from . import _adapters, _meta
-from ._collections import FreezableDefaultDict, Pair
-from ._compat import (
- NullFinder,
- install,
- pypy_partial,
-)
-from ._functools import method_cache, pass_none
-from ._itertools import always_iterable, unique_everseen
-from ._meta import PackageMetadata, SimplePath
-
-from contextlib import suppress
-from importlib import import_module
-from importlib.abc import MetaPathFinder
-from itertools import starmap
-from typing import List, Mapping, Optional, Union
-
-
-__all__ = [
- 'Distribution',
- 'DistributionFinder',
- 'PackageMetadata',
- 'PackageNotFoundError',
- 'distribution',
- 'distributions',
- 'entry_points',
- 'files',
- 'metadata',
- 'packages_distributions',
- 'requires',
- 'version',
-]
-
-
-class PackageNotFoundError(ModuleNotFoundError):
- """The package was not found."""
-
- def __str__(self):
- return f"No package metadata was found for {self.name}"
-
- @property
- def name(self):
- (name,) = self.args
- return name
-
-
-class Sectioned:
- """
- A simple entry point config parser for performance
-
- >>> for item in Sectioned.read(Sectioned._sample):
- ... print(item)
- Pair(name='sec1', value='# comments ignored')
- Pair(name='sec1', value='a = 1')
- Pair(name='sec1', value='b = 2')
- Pair(name='sec2', value='a = 2')
-
- >>> res = Sectioned.section_pairs(Sectioned._sample)
- >>> item = next(res)
- >>> item.name
- 'sec1'
- >>> item.value
- Pair(name='a', value='1')
- >>> item = next(res)
- >>> item.value
- Pair(name='b', value='2')
- >>> item = next(res)
- >>> item.name
- 'sec2'
- >>> item.value
- Pair(name='a', value='2')
- >>> list(res)
- []
- """
-
- _sample = textwrap.dedent(
- """
- [sec1]
- # comments ignored
- a = 1
- b = 2
-
- [sec2]
- a = 2
- """
- ).lstrip()
-
- @classmethod
- def section_pairs(cls, text):
- return (
- section._replace(value=Pair.parse(section.value))
- for section in cls.read(text, filter_=cls.valid)
- if section.name is not None
- )
-
- @staticmethod
- def read(text, filter_=None):
- lines = filter(filter_, map(str.strip, text.splitlines()))
- name = None
- for value in lines:
- section_match = value.startswith('[') and value.endswith(']')
- if section_match:
- name = value.strip('[]')
- continue
- yield Pair(name, value)
-
- @staticmethod
- def valid(line):
- return line and not line.startswith('#')
-
-
-class DeprecatedTuple:
- """
- Provide subscript item access for backward compatibility.
-
- >>> recwarn = getfixture('recwarn')
- >>> ep = EntryPoint(name='name', value='value', group='group')
- >>> ep[:]
- ('name', 'value', 'group')
- >>> ep[0]
- 'name'
- >>> len(recwarn)
- 1
- """
-
- _warn = functools.partial(
- warnings.warn,
- "EntryPoint tuple interface is deprecated. Access members by name.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def __getitem__(self, item):
- self._warn()
- return self._key()[item]
-
-
-class EntryPoint(DeprecatedTuple):
- """An entry point as defined by Python packaging conventions.
-
- See `the packaging docs on entry points
- `_
- for more information.
-
- >>> ep = EntryPoint(
- ... name=None, group=None, value='package.module:attr [extra1, extra2]')
- >>> ep.module
- 'package.module'
- >>> ep.attr
- 'attr'
- >>> ep.extras
- ['extra1', 'extra2']
- """
-
- pattern = re.compile(
- r'(?P[\w.]+)\s*'
- r'(:\s*(?P[\w.]+)\s*)?'
- r'((?P\[.*\])\s*)?$'
- )
- """
- A regular expression describing the syntax for an entry point,
- which might look like:
-
- - module
- - package.module
- - package.module:attribute
- - package.module:object.attribute
- - package.module:attr [extra1, extra2]
-
- Other combinations are possible as well.
-
- The expression is lenient about whitespace around the ':',
- following the attr, and following any extras.
- """
-
- dist: Optional['Distribution'] = None
-
- def __init__(self, name, value, group):
- vars(self).update(name=name, value=value, group=group)
-
- def load(self):
- """Load the entry point from its definition. If only a module
- is indicated by the value, return that module. Otherwise,
- return the named object.
- """
- match = self.pattern.match(self.value)
- module = import_module(match.group('module'))
- attrs = filter(None, (match.group('attr') or '').split('.'))
- return functools.reduce(getattr, attrs, module)
-
- @property
- def module(self):
- match = self.pattern.match(self.value)
- return match.group('module')
-
- @property
- def attr(self):
- match = self.pattern.match(self.value)
- return match.group('attr')
-
- @property
- def extras(self):
- match = self.pattern.match(self.value)
- return re.findall(r'\w+', match.group('extras') or '')
-
- def _for(self, dist):
- vars(self).update(dist=dist)
- return self
-
- def __iter__(self):
- """
- Supply iter so one may construct dicts of EntryPoints by name.
- """
- msg = (
- "Construction of dict of EntryPoints is deprecated in "
- "favor of EntryPoints."
- )
- warnings.warn(msg, DeprecationWarning)
- return iter((self.name, self))
-
- def matches(self, **params):
- """
- EntryPoint matches the given parameters.
-
- >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
- >>> ep.matches(group='foo')
- True
- >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
- True
- >>> ep.matches(group='foo', name='other')
- False
- >>> ep.matches()
- True
- >>> ep.matches(extras=['extra1', 'extra2'])
- True
- >>> ep.matches(module='bing')
- True
- >>> ep.matches(attr='bong')
- True
- """
- attrs = (getattr(self, param) for param in params)
- return all(map(operator.eq, params.values(), attrs))
-
- def _key(self):
- return self.name, self.value, self.group
-
- def __lt__(self, other):
- return self._key() < other._key()
-
- def __eq__(self, other):
- return self._key() == other._key()
-
- def __setattr__(self, name, value):
- raise AttributeError("EntryPoint objects are immutable.")
-
- def __repr__(self):
- return (
- f'EntryPoint(name={self.name!r}, value={self.value!r}, '
- f'group={self.group!r})'
- )
-
- def __hash__(self):
- return hash(self._key())
-
-
-class DeprecatedList(list):
- """
- Allow an otherwise immutable object to implement mutability
- for compatibility.
-
- >>> recwarn = getfixture('recwarn')
- >>> dl = DeprecatedList(range(3))
- >>> dl[0] = 1
- >>> dl.append(3)
- >>> del dl[3]
- >>> dl.reverse()
- >>> dl.sort()
- >>> dl.extend([4])
- >>> dl.pop(-1)
- 4
- >>> dl.remove(1)
- >>> dl += [5]
- >>> dl + [6]
- [1, 2, 5, 6]
- >>> dl + (6,)
- [1, 2, 5, 6]
- >>> dl.insert(0, 0)
- >>> dl
- [0, 1, 2, 5]
- >>> dl == [0, 1, 2, 5]
- True
- >>> dl == (0, 1, 2, 5)
- True
- >>> len(recwarn)
- 1
- """
-
- __slots__ = ()
-
- _warn = functools.partial(
- warnings.warn,
- "EntryPoints list interface is deprecated. Cast to list if needed.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def _wrap_deprecated_method(method_name: str): # type: ignore
- def wrapped(self, *args, **kwargs):
- self._warn()
- return getattr(super(), method_name)(*args, **kwargs)
-
- return method_name, wrapped
-
- locals().update(
- map(
- _wrap_deprecated_method,
- '__setitem__ __delitem__ append reverse extend pop remove '
- '__iadd__ insert sort'.split(),
- )
- )
-
- def __add__(self, other):
- if not isinstance(other, tuple):
- self._warn()
- other = tuple(other)
- return self.__class__(tuple(self) + other)
-
- def __eq__(self, other):
- if not isinstance(other, tuple):
- self._warn()
- other = tuple(other)
-
- return tuple(self).__eq__(other)
-
-
-class EntryPoints(DeprecatedList):
- """
- An immutable collection of selectable EntryPoint objects.
- """
-
- __slots__ = ()
-
- def __getitem__(self, name): # -> EntryPoint:
- """
- Get the EntryPoint in self matching name.
- """
- if isinstance(name, int):
- warnings.warn(
- "Accessing entry points by index is deprecated. "
- "Cast to tuple if needed.",
- DeprecationWarning,
- stacklevel=2,
- )
- return super().__getitem__(name)
- try:
- return next(iter(self.select(name=name)))
- except StopIteration:
- raise KeyError(name)
-
- def select(self, **params):
- """
- Select entry points from self that match the
- given parameters (typically group and/or name).
- """
- return EntryPoints(ep for ep in self if ep.matches(**params))
-
- @property
- def names(self):
- """
- Return the set of all names of all entry points.
- """
- return {ep.name for ep in self}
-
- @property
- def groups(self):
- """
- Return the set of all groups of all entry points.
-
- For coverage while SelectableGroups is present.
- >>> EntryPoints().groups
- set()
- """
- return {ep.group for ep in self}
-
- @classmethod
- def _from_text_for(cls, text, dist):
- return cls(ep._for(dist) for ep in cls._from_text(text))
-
- @staticmethod
- def _from_text(text):
- return (
- EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
- for item in Sectioned.section_pairs(text or '')
- )
-
-
-class Deprecated:
- """
- Compatibility add-in for mapping to indicate that
- mapping behavior is deprecated.
-
- >>> recwarn = getfixture('recwarn')
- >>> class DeprecatedDict(Deprecated, dict): pass
- >>> dd = DeprecatedDict(foo='bar')
- >>> dd.get('baz', None)
- >>> dd['foo']
- 'bar'
- >>> list(dd)
- ['foo']
- >>> list(dd.keys())
- ['foo']
- >>> 'foo' in dd
- True
- >>> list(dd.values())
- ['bar']
- >>> len(recwarn)
- 1
- """
-
- _warn = functools.partial(
- warnings.warn,
- "SelectableGroups dict interface is deprecated. Use select.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def __getitem__(self, name):
- self._warn()
- return super().__getitem__(name)
-
- def get(self, name, default=None):
- self._warn()
- return super().get(name, default)
-
- def __iter__(self):
- self._warn()
- return super().__iter__()
-
- def __contains__(self, *args):
- self._warn()
- return super().__contains__(*args)
-
- def keys(self):
- self._warn()
- return super().keys()
-
- def values(self):
- self._warn()
- return super().values()
-
-
-class SelectableGroups(Deprecated, dict):
- """
- A backward- and forward-compatible result from
- entry_points that fully implements the dict interface.
- """
-
- @classmethod
- def load(cls, eps):
- by_group = operator.attrgetter('group')
- ordered = sorted(eps, key=by_group)
- grouped = itertools.groupby(ordered, by_group)
- return cls((group, EntryPoints(eps)) for group, eps in grouped)
-
- @property
- def _all(self):
- """
- Reconstruct a list of all entrypoints from the groups.
- """
- groups = super(Deprecated, self).values()
- return EntryPoints(itertools.chain.from_iterable(groups))
-
- @property
- def groups(self):
- return self._all.groups
-
- @property
- def names(self):
- """
- for coverage:
- >>> SelectableGroups().names
- set()
- """
- return self._all.names
-
- def select(self, **params):
- if not params:
- return self
- return self._all.select(**params)
-
-
-class PackagePath(pathlib.PurePosixPath):
- """A reference to a path in a package"""
-
- def read_text(self, encoding='utf-8'):
- with self.locate().open(encoding=encoding) as stream:
- return stream.read()
-
- def read_binary(self):
- with self.locate().open('rb') as stream:
- return stream.read()
-
- def locate(self):
- """Return a path-like object for this path"""
- return self.dist.locate_file(self)
-
-
-class FileHash:
- def __init__(self, spec):
- self.mode, _, self.value = spec.partition('=')
-
- def __repr__(self):
- return f''
-
-
-class Distribution:
- """A Python distribution package."""
-
- @abc.abstractmethod
- def read_text(self, filename):
- """Attempt to load metadata file given by the name.
-
- :param filename: The name of the file in the distribution info.
- :return: The text if found, otherwise None.
- """
-
- @abc.abstractmethod
- def locate_file(self, path):
- """
- Given a path to a file in this distribution, return a path
- to it.
- """
-
- @classmethod
- def from_name(cls, name):
- """Return the Distribution for the given package name.
-
- :param name: The name of the distribution package to search for.
- :return: The Distribution instance (or subclass thereof) for the named
- package, if found.
- :raises PackageNotFoundError: When the named package's distribution
- metadata cannot be found.
- """
- for resolver in cls._discover_resolvers():
- dists = resolver(DistributionFinder.Context(name=name))
- dist = next(iter(dists), None)
- if dist is not None:
- return dist
- else:
- raise PackageNotFoundError(name)
-
- @classmethod
- def discover(cls, **kwargs):
- """Return an iterable of Distribution objects for all packages.
-
- Pass a ``context`` or pass keyword arguments for constructing
- a context.
-
- :context: A ``DistributionFinder.Context`` object.
- :return: Iterable of Distribution objects for all packages.
- """
- context = kwargs.pop('context', None)
- if context and kwargs:
- raise ValueError("cannot accept context and kwargs")
- context = context or DistributionFinder.Context(**kwargs)
- return itertools.chain.from_iterable(
- resolver(context) for resolver in cls._discover_resolvers()
- )
-
- @staticmethod
- def at(path):
- """Return a Distribution for the indicated metadata path
-
- :param path: a string or path-like object
- :return: a concrete Distribution instance for the path
- """
- return PathDistribution(pathlib.Path(path))
-
- @staticmethod
- def _discover_resolvers():
- """Search the meta_path for resolvers."""
- declared = (
- getattr(finder, 'find_distributions', None) for finder in sys.meta_path
- )
- return filter(None, declared)
-
- @property
- def metadata(self) -> _meta.PackageMetadata:
- """Return the parsed metadata for this Distribution.
-
- The returned object will have keys that name the various bits of
- metadata. See PEP 566 for details.
- """
- text = (
- self.read_text('METADATA')
- or self.read_text('PKG-INFO')
- # This last clause is here to support old egg-info files. Its
- # effect is to just end up using the PathDistribution's self._path
- # (which points to the egg-info file) attribute unchanged.
- or self.read_text('')
- )
- return _adapters.Message(email.message_from_string(text))
-
- @property
- def name(self):
- """Return the 'Name' metadata for the distribution package."""
- return self.metadata['Name']
-
- @property
- def _normalized_name(self):
- """Return a normalized version of the name."""
- return Prepared.normalize(self.name)
-
- @property
- def version(self):
- """Return the 'Version' metadata for the distribution package."""
- return self.metadata['Version']
-
- @property
- def entry_points(self):
- return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
-
- @property
- def files(self):
- """Files in this distribution.
-
- :return: List of PackagePath for this distribution or None
-
- Result is `None` if the metadata file that enumerates files
- (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
- missing.
- Result may be empty if the metadata exists but is empty.
- """
-
- def make_file(name, hash=None, size_str=None):
- result = PackagePath(name)
- result.hash = FileHash(hash) if hash else None
- result.size = int(size_str) if size_str else None
- result.dist = self
- return result
-
- @pass_none
- def make_files(lines):
- return list(starmap(make_file, csv.reader(lines)))
-
- return make_files(self._read_files_distinfo() or self._read_files_egginfo())
-
- def _read_files_distinfo(self):
- """
- Read the lines of RECORD
- """
- text = self.read_text('RECORD')
- return text and text.splitlines()
-
- def _read_files_egginfo(self):
- """
- SOURCES.txt might contain literal commas, so wrap each line
- in quotes.
- """
- text = self.read_text('SOURCES.txt')
- return text and map('"{}"'.format, text.splitlines())
-
- @property
- def requires(self):
- """Generated requirements specified for this Distribution"""
- reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
- return reqs and list(reqs)
-
- def _read_dist_info_reqs(self):
- return self.metadata.get_all('Requires-Dist')
-
- def _read_egg_info_reqs(self):
- source = self.read_text('requires.txt')
- return pass_none(self._deps_from_requires_text)(source)
-
- @classmethod
- def _deps_from_requires_text(cls, source):
- return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
-
- @staticmethod
- def _convert_egg_info_reqs_to_simple_reqs(sections):
- """
- Historically, setuptools would solicit and store 'extra'
- requirements, including those with environment markers,
- in separate sections. More modern tools expect each
- dependency to be defined separately, with any relevant
- extras and environment markers attached directly to that
- requirement. This method converts the former to the
- latter. See _test_deps_from_requires_text for an example.
- """
-
- def make_condition(name):
- return name and f'extra == "{name}"'
-
- def quoted_marker(section):
- section = section or ''
- extra, sep, markers = section.partition(':')
- if extra and markers:
- markers = f'({markers})'
- conditions = list(filter(None, [markers, make_condition(extra)]))
- return '; ' + ' and '.join(conditions) if conditions else ''
-
- def url_req_space(req):
- """
- PEP 508 requires a space between the url_spec and the quoted_marker.
- Ref python/importlib_metadata#357.
- """
- # '@' is uniquely indicative of a url_req.
- return ' ' * ('@' in req)
-
- for section in sections:
- space = url_req_space(section.value)
- yield section.value + space + quoted_marker(section.name)
-
-
-class DistributionFinder(MetaPathFinder):
- """
- A MetaPathFinder capable of discovering installed distributions.
- """
-
- class Context:
- """
- Keyword arguments presented by the caller to
- ``distributions()`` or ``Distribution.discover()``
- to narrow the scope of a search for distributions
- in all DistributionFinders.
-
- Each DistributionFinder may expect any parameters
- and should attempt to honor the canonical
- parameters defined below when appropriate.
- """
-
- name = None
- """
- Specific name for which a distribution finder should match.
- A name of ``None`` matches all distributions.
- """
-
- def __init__(self, **kwargs):
- vars(self).update(kwargs)
-
- @property
- def path(self):
- """
- The sequence of directory path that a distribution finder
- should search.
-
- Typically refers to Python installed package paths such as
- "site-packages" directories and defaults to ``sys.path``.
- """
- return vars(self).get('path', sys.path)
-
- @abc.abstractmethod
- def find_distributions(self, context=Context()):
- """
- Find distributions.
-
- Return an iterable of all Distribution instances capable of
- loading the metadata for packages matching the ``context``,
- a DistributionFinder.Context instance.
- """
-
-
-class FastPath:
- """
- Micro-optimized class for searching a path for
- children.
-
- >>> FastPath('').children()
- ['...']
- """
-
- @functools.lru_cache() # type: ignore
- def __new__(cls, root):
- return super().__new__(cls)
-
- def __init__(self, root):
- self.root = root
-
- def joinpath(self, child):
- return pathlib.Path(self.root, child)
-
- def children(self):
- with suppress(Exception):
- return os.listdir(self.root or '.')
- with suppress(Exception):
- return self.zip_children()
- return []
-
- def zip_children(self):
- zip_path = zipp.Path(self.root)
- names = zip_path.root.namelist()
- self.joinpath = zip_path.joinpath
-
- return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
-
- def search(self, name):
- return self.lookup(self.mtime).search(name)
-
- @property
- def mtime(self):
- with suppress(OSError):
- return os.stat(self.root).st_mtime
- self.lookup.cache_clear()
-
- @method_cache
- def lookup(self, mtime):
- return Lookup(self)
-
-
-class Lookup:
- def __init__(self, path: FastPath):
- base = os.path.basename(path.root).lower()
- base_is_egg = base.endswith(".egg")
- self.infos = FreezableDefaultDict(list)
- self.eggs = FreezableDefaultDict(list)
-
- for child in path.children():
- low = child.lower()
- if low.endswith((".dist-info", ".egg-info")):
- # rpartition is faster than splitext and suitable for this purpose.
- name = low.rpartition(".")[0].partition("-")[0]
- normalized = Prepared.normalize(name)
- self.infos[normalized].append(path.joinpath(child))
- elif base_is_egg and low == "egg-info":
- name = base.rpartition(".")[0].partition("-")[0]
- legacy_normalized = Prepared.legacy_normalize(name)
- self.eggs[legacy_normalized].append(path.joinpath(child))
-
- self.infos.freeze()
- self.eggs.freeze()
-
- def search(self, prepared):
- infos = (
- self.infos[prepared.normalized]
- if prepared
- else itertools.chain.from_iterable(self.infos.values())
- )
- eggs = (
- self.eggs[prepared.legacy_normalized]
- if prepared
- else itertools.chain.from_iterable(self.eggs.values())
- )
- return itertools.chain(infos, eggs)
-
-
-class Prepared:
- """
- A prepared search for metadata on a possibly-named package.
- """
-
- normalized = None
- legacy_normalized = None
-
- def __init__(self, name):
- self.name = name
- if name is None:
- return
- self.normalized = self.normalize(name)
- self.legacy_normalized = self.legacy_normalize(name)
-
- @staticmethod
- def normalize(name):
- """
- PEP 503 normalization plus dashes as underscores.
- """
- return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
-
- @staticmethod
- def legacy_normalize(name):
- """
- Normalize the package name as found in the convention in
- older packaging tools versions and specs.
- """
- return name.lower().replace('-', '_')
-
- def __bool__(self):
- return bool(self.name)
-
-
-@install
-class MetadataPathFinder(NullFinder, DistributionFinder):
- """A degenerate finder for distribution packages on the file system.
-
- This finder supplies only a find_distributions() method for versions
- of Python that do not have a PathFinder find_distributions().
- """
-
- def find_distributions(self, context=DistributionFinder.Context()):
- """
- Find distributions.
-
- Return an iterable of all Distribution instances capable of
- loading the metadata for packages matching ``context.name``
- (or all names if ``None`` indicated) along the paths in the list
- of directories ``context.path``.
- """
- found = self._search_paths(context.name, context.path)
- return map(PathDistribution, found)
-
- @classmethod
- def _search_paths(cls, name, paths):
- """Find metadata directories in paths heuristically."""
- prepared = Prepared(name)
- return itertools.chain.from_iterable(
- path.search(prepared) for path in map(FastPath, paths)
- )
-
- def invalidate_caches(cls):
- FastPath.__new__.cache_clear()
-
-
-class PathDistribution(Distribution):
- def __init__(self, path: SimplePath):
- """Construct a distribution.
-
- :param path: SimplePath indicating the metadata directory.
- """
- self._path = path
-
- def read_text(self, filename):
- with suppress(
- FileNotFoundError,
- IsADirectoryError,
- KeyError,
- NotADirectoryError,
- PermissionError,
- ):
- return self._path.joinpath(filename).read_text(encoding='utf-8')
-
- read_text.__doc__ = Distribution.read_text.__doc__
-
- def locate_file(self, path):
- return self._path.parent / path
-
- @property
- def _normalized_name(self):
- """
- Performance optimization: where possible, resolve the
- normalized name from the file system path.
- """
- stem = os.path.basename(str(self._path))
- return self._name_from_stem(stem) or super()._normalized_name
-
- def _name_from_stem(self, stem):
- name, ext = os.path.splitext(stem)
- if ext not in ('.dist-info', '.egg-info'):
- return
- name, sep, rest = stem.partition('-')
- return name
-
-
-def distribution(distribution_name):
- """Get the ``Distribution`` instance for the named package.
-
- :param distribution_name: The name of the distribution package as a string.
- :return: A ``Distribution`` instance (or subclass thereof).
- """
- return Distribution.from_name(distribution_name)
-
-
-def distributions(**kwargs):
- """Get all ``Distribution`` instances in the current environment.
-
- :return: An iterable of ``Distribution`` instances.
- """
- return Distribution.discover(**kwargs)
-
-
-def metadata(distribution_name) -> _meta.PackageMetadata:
- """Get the metadata for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: A PackageMetadata containing the parsed metadata.
- """
- return Distribution.from_name(distribution_name).metadata
-
-
-def version(distribution_name):
- """Get the version string for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: The version string for the package as defined in the package's
- "Version" metadata key.
- """
- return distribution(distribution_name).version
-
-
-def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
- """Return EntryPoint objects for all installed packages.
-
- Pass selection parameters (group or name) to filter the
- result to entry points matching those properties (see
- EntryPoints.select()).
-
- For compatibility, returns ``SelectableGroups`` object unless
- selection parameters are supplied. In the future, this function
- will return ``EntryPoints`` instead of ``SelectableGroups``
- even when no selection parameters are supplied.
-
- For maximum future compatibility, pass selection parameters
- or invoke ``.select`` with parameters on the result.
-
- :return: EntryPoints or SelectableGroups for all installed packages.
- """
- norm_name = operator.attrgetter('_normalized_name')
- unique = functools.partial(unique_everseen, key=norm_name)
- eps = itertools.chain.from_iterable(
- dist.entry_points for dist in unique(distributions())
- )
- return SelectableGroups.load(eps).select(**params)
-
-
-def files(distribution_name):
- """Return a list of files for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: List of files composing the distribution.
- """
- return distribution(distribution_name).files
-
-
-def requires(distribution_name):
- """
- Return a list of requirements for the named package.
-
- :return: An iterator of requirements, suitable for
- packaging.requirement.Requirement.
- """
- return distribution(distribution_name).requires
-
-
-def packages_distributions() -> Mapping[str, List[str]]:
- """
- Return a mapping of top-level packages to their
- distributions.
-
- >>> import collections.abc
- >>> pkgs = packages_distributions()
- >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
- True
- """
- pkg_to_dist = collections.defaultdict(list)
- for dist in distributions():
- for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
- pkg_to_dist[pkg].append(dist.metadata['Name'])
- return dict(pkg_to_dist)
-
-
-def _top_level_declared(dist):
- return (dist.read_text('top_level.txt') or '').split()
-
-
-def _top_level_inferred(dist):
- return {
- f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
- for f in always_iterable(dist.files)
- if f.suffix == ".py"
- }
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_adapters.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_adapters.py
deleted file mode 100644
index aa460d3e..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_adapters.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import re
-import textwrap
-import email.message
-
-from ._text import FoldedCase
-
-
-class Message(email.message.Message):
- multiple_use_keys = set(
- map(
- FoldedCase,
- [
- 'Classifier',
- 'Obsoletes-Dist',
- 'Platform',
- 'Project-URL',
- 'Provides-Dist',
- 'Provides-Extra',
- 'Requires-Dist',
- 'Requires-External',
- 'Supported-Platform',
- 'Dynamic',
- ],
- )
- )
- """
- Keys that may be indicated multiple times per PEP 566.
- """
-
- def __new__(cls, orig: email.message.Message):
- res = super().__new__(cls)
- vars(res).update(vars(orig))
- return res
-
- def __init__(self, *args, **kwargs):
- self._headers = self._repair_headers()
-
- # suppress spurious error from mypy
- def __iter__(self):
- return super().__iter__()
-
- def _repair_headers(self):
- def redent(value):
- "Correct for RFC822 indentation"
- if not value or '\n' not in value:
- return value
- return textwrap.dedent(' ' * 8 + value)
-
- headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
- if self._payload:
- headers.append(('Description', self.get_payload()))
- return headers
-
- @property
- def json(self):
- """
- Convert PackageMetadata to a JSON-compatible format
- per PEP 0566.
- """
-
- def transform(key):
- value = self.get_all(key) if key in self.multiple_use_keys else self[key]
- if key == 'Keywords':
- value = re.split(r'\s+', value)
- tk = key.lower().replace('-', '_')
- return tk, value
-
- return dict(map(transform, map(FoldedCase, self)))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_collections.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_collections.py
deleted file mode 100644
index cf0954e1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_collections.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import collections
-
-
-# from jaraco.collections 3.3
-class FreezableDefaultDict(collections.defaultdict):
- """
- Often it is desirable to prevent the mutation of
- a default dict after its initial construction, such
- as to prevent mutation during iteration.
-
- >>> dd = FreezableDefaultDict(list)
- >>> dd[0].append('1')
- >>> dd.freeze()
- >>> dd[1]
- []
- >>> len(dd)
- 1
- """
-
- def __missing__(self, key):
- return getattr(self, '_frozen', super().__missing__)(key)
-
- def freeze(self):
- self._frozen = lambda key: self.default_factory()
-
-
-class Pair(collections.namedtuple('Pair', 'name value')):
- @classmethod
- def parse(cls, text):
- return cls(*map(str.strip, text.split("=", 1)))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_compat.py
deleted file mode 100644
index 8fe4e4e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_compat.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import sys
-import platform
-
-
-__all__ = ['install', 'NullFinder', 'Protocol']
-
-
-try:
- from typing import Protocol
-except ImportError: # pragma: no cover
- from typing_extensions import Protocol # type: ignore
-
-
-def install(cls):
- """
- Class decorator for installation on sys.meta_path.
-
- Adds the backport DistributionFinder to sys.meta_path and
- attempts to disable the finder functionality of the stdlib
- DistributionFinder.
- """
- sys.meta_path.append(cls())
- disable_stdlib_finder()
- return cls
-
-
-def disable_stdlib_finder():
- """
- Give the backport primacy for discovering path-based distributions
- by monkey-patching the stdlib O_O.
-
- See #91 for more background for rationale on this sketchy
- behavior.
- """
-
- def matches(finder):
- return getattr(
- finder, '__module__', None
- ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
-
- for finder in filter(matches, sys.meta_path): # pragma: nocover
- del finder.find_distributions
-
-
-class NullFinder:
- """
- A "Finder" (aka "MetaClassFinder") that never finds any modules,
- but may find distributions.
- """
-
- @staticmethod
- def find_spec(*args, **kwargs):
- return None
-
- # In Python 2, the import system requires finders
- # to have a find_module() method, but this usage
- # is deprecated in Python 3 in favor of find_spec().
- # For the purposes of this finder (i.e. being present
- # on sys.meta_path but having no other import
- # system functionality), the two methods are identical.
- find_module = find_spec
-
-
-def pypy_partial(val):
- """
- Adjust for variable stacklevel on partial under PyPy.
-
- Workaround for #327.
- """
- is_pypy = platform.python_implementation() == 'PyPy'
- return val + is_pypy
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_functools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_functools.py
deleted file mode 100644
index 71f66bd0..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_functools.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import types
-import functools
-
-
-# from jaraco.functools 3.3
-def method_cache(method, cache_wrapper=None):
- """
- Wrap lru_cache to support storing the cache data in the object instances.
-
- Abstracts the common paradigm where the method explicitly saves an
- underscore-prefixed protected property on first call and returns that
- subsequently.
-
- >>> class MyClass:
- ... calls = 0
- ...
- ... @method_cache
- ... def method(self, value):
- ... self.calls += 1
- ... return value
-
- >>> a = MyClass()
- >>> a.method(3)
- 3
- >>> for x in range(75):
- ... res = a.method(x)
- >>> a.calls
- 75
-
- Note that the apparent behavior will be exactly like that of lru_cache
- except that the cache is stored on each instance, so values in one
- instance will not flush values from another, and when an instance is
- deleted, so are the cached values for that instance.
-
- >>> b = MyClass()
- >>> for x in range(35):
- ... res = b.method(x)
- >>> b.calls
- 35
- >>> a.method(0)
- 0
- >>> a.calls
- 75
-
- Note that if method had been decorated with ``functools.lru_cache()``,
- a.calls would have been 76 (due to the cached value of 0 having been
- flushed by the 'b' instance).
-
- Clear the cache with ``.cache_clear()``
-
- >>> a.method.cache_clear()
-
- Same for a method that hasn't yet been called.
-
- >>> c = MyClass()
- >>> c.method.cache_clear()
-
- Another cache wrapper may be supplied:
-
- >>> cache = functools.lru_cache(maxsize=2)
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
- >>> a = MyClass()
- >>> a.method2()
- 3
-
- Caution - do not subsequently wrap the method with another decorator, such
- as ``@property``, which changes the semantics of the function.
-
- See also
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
- for another implementation and additional justification.
- """
- cache_wrapper = cache_wrapper or functools.lru_cache()
-
- def wrapper(self, *args, **kwargs):
- # it's the first call, replace the method with a cached, bound method
- bound_method = types.MethodType(method, self)
- cached_method = cache_wrapper(bound_method)
- setattr(self, method.__name__, cached_method)
- return cached_method(*args, **kwargs)
-
- # Support cache clear even before cache has been created.
- wrapper.cache_clear = lambda: None
-
- return wrapper
-
-
-# From jaraco.functools 3.3
-def pass_none(func):
- """
- Wrap func so it's not called if its first param is None
-
- >>> print_text = pass_none(print)
- >>> print_text('text')
- text
- >>> print_text(None)
- """
-
- @functools.wraps(func)
- def wrapper(param, *args, **kwargs):
- if param is not None:
- return func(param, *args, **kwargs)
-
- return wrapper
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_itertools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_itertools.py
deleted file mode 100644
index d4ca9b91..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_itertools.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from itertools import filterfalse
-
-
-def unique_everseen(iterable, key=None):
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen = set()
- seen_add = seen.add
- if key is None:
- for element in filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
-
-
-# copied from more_itertools 8.8
-def always_iterable(obj, base_type=(str, bytes)):
- """If *obj* is iterable, return an iterator over its items::
-
- >>> obj = (1, 2, 3)
- >>> list(always_iterable(obj))
- [1, 2, 3]
-
- If *obj* is not iterable, return a one-item iterable containing *obj*::
-
- >>> obj = 1
- >>> list(always_iterable(obj))
- [1]
-
- If *obj* is ``None``, return an empty iterable:
-
- >>> obj = None
- >>> list(always_iterable(None))
- []
-
- By default, binary and text strings are not considered iterable::
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj))
- ['foo']
-
- If *base_type* is set, objects for which ``isinstance(obj, base_type)``
- returns ``True`` won't be considered iterable.
-
- >>> obj = {'a': 1}
- >>> list(always_iterable(obj)) # Iterate over the dict's keys
- ['a']
- >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
- [{'a': 1}]
-
- Set *base_type* to ``None`` to avoid any special handling and treat objects
- Python considers iterable as iterable:
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj, base_type=None))
- ['f', 'o', 'o']
- """
- if obj is None:
- return iter(())
-
- if (base_type is not None) and isinstance(obj, base_type):
- return iter((obj,))
-
- try:
- return iter(obj)
- except TypeError:
- return iter((obj,))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_meta.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_meta.py
deleted file mode 100644
index 37ee43e6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_meta.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from ._compat import Protocol
-from typing import Any, Dict, Iterator, List, TypeVar, Union
-
-
-_T = TypeVar("_T")
-
-
-class PackageMetadata(Protocol):
- def __len__(self) -> int:
- ... # pragma: no cover
-
- def __contains__(self, item: str) -> bool:
- ... # pragma: no cover
-
- def __getitem__(self, key: str) -> str:
- ... # pragma: no cover
-
- def __iter__(self) -> Iterator[str]:
- ... # pragma: no cover
-
- def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
- """
- Return all values associated with a possibly multi-valued key.
- """
-
- @property
- def json(self) -> Dict[str, Union[str, List[str]]]:
- """
- A JSON-compatible form of the metadata.
- """
-
-
-class SimplePath(Protocol):
- """
- A minimal subset of pathlib.Path required by PathDistribution.
- """
-
- def joinpath(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def __truediv__(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def parent(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def read_text(self) -> str:
- ... # pragma: no cover
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_text.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_text.py
deleted file mode 100644
index c88cfbb2..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/_text.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import re
-
-from ._functools import method_cache
-
-
-# from jaraco.text 3.5
-class FoldedCase(str):
- """
- A case insensitive string class; behaves just like str
- except compares equal when the only variation is case.
-
- >>> s = FoldedCase('hello world')
-
- >>> s == 'Hello World'
- True
-
- >>> 'Hello World' == s
- True
-
- >>> s != 'Hello World'
- False
-
- >>> s.index('O')
- 4
-
- >>> s.split('O')
- ['hell', ' w', 'rld']
-
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
- ['alpha', 'Beta', 'GAMMA']
-
- Sequence membership is straightforward.
-
- >>> "Hello World" in [s]
- True
- >>> s in ["Hello World"]
- True
-
- You may test for set inclusion, but candidate and elements
- must both be folded.
-
- >>> FoldedCase("Hello World") in {s}
- True
- >>> s in {FoldedCase("Hello World")}
- True
-
- String inclusion works as long as the FoldedCase object
- is on the right.
-
- >>> "hello" in FoldedCase("Hello World")
- True
-
- But not if the FoldedCase object is on the left:
-
- >>> FoldedCase('hello') in 'Hello World'
- False
-
- In that case, use in_:
-
- >>> FoldedCase('hello').in_('Hello World')
- True
-
- >>> FoldedCase('hello') > FoldedCase('Hello')
- False
- """
-
- def __lt__(self, other):
- return self.lower() < other.lower()
-
- def __gt__(self, other):
- return self.lower() > other.lower()
-
- def __eq__(self, other):
- return self.lower() == other.lower()
-
- def __ne__(self, other):
- return self.lower() != other.lower()
-
- def __hash__(self):
- return hash(self.lower())
-
- def __contains__(self, other):
- return super().lower().__contains__(other.lower())
-
- def in_(self, other):
- "Does self appear in other?"
- return self in FoldedCase(other)
-
- # cache lower since it's likely to be called frequently.
- @method_cache
- def lower(self):
- return super().lower()
-
- def index(self, sub):
- return self.lower().index(sub.lower())
-
- def split(self, splitter=' ', maxsplit=0):
- pattern = re.compile(re.escape(splitter), re.I)
- return pattern.split(self, maxsplit)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/py.typed b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/importlib_metadata/py.typed
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/__init__.py
old mode 100644
new mode 100755
index f17866f6..15e13b6f
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/__init__.py
@@ -1,44 +1,83 @@
# -*- coding: utf-8 -*-
-"""Jinja is a template engine written in pure Python. It provides a
-non-XML syntax that supports inline expressions and an optional
-sandboxed environment.
"""
-from markupsafe import escape
-from markupsafe import Markup
-
-from .bccache import BytecodeCache
-from .bccache import FileSystemBytecodeCache
-from .bccache import MemcachedBytecodeCache
-from .environment import Environment
-from .environment import Template
-from .exceptions import TemplateAssertionError
-from .exceptions import TemplateError
-from .exceptions import TemplateNotFound
-from .exceptions import TemplateRuntimeError
-from .exceptions import TemplatesNotFound
-from .exceptions import TemplateSyntaxError
-from .exceptions import UndefinedError
-from .filters import contextfilter
-from .filters import environmentfilter
-from .filters import evalcontextfilter
-from .loaders import BaseLoader
-from .loaders import ChoiceLoader
-from .loaders import DictLoader
-from .loaders import FileSystemLoader
-from .loaders import FunctionLoader
-from .loaders import ModuleLoader
-from .loaders import PackageLoader
-from .loaders import PrefixLoader
-from .runtime import ChainableUndefined
-from .runtime import DebugUndefined
-from .runtime import make_logging_undefined
-from .runtime import StrictUndefined
-from .runtime import Undefined
-from .utils import clear_caches
-from .utils import contextfunction
-from .utils import environmentfunction
-from .utils import evalcontextfunction
-from .utils import is_undefined
-from .utils import select_autoescape
-
-__version__ = "2.11.3"
+ jinja2
+ ~~~~~~
+
+ Jinja2 is a template engine written in pure Python. It provides a
+ Django inspired non-XML syntax but supports inline expressions and
+ an optional sandboxed environment.
+
+ Nutshell
+ --------
+
+ Here a small example of a Jinja2 template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+__docformat__ = 'restructuredtext en'
+__version__ = '2.10.1'
+
+# high level interface
+from jinja2.environment import Environment, Template
+
+# loaders
+from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
+ DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
+ ModuleLoader
+
+# bytecode caches
+from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
+ MemcachedBytecodeCache
+
+# undefined types
+from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
+ make_logging_undefined
+
+# exceptions
+from jinja2.exceptions import TemplateError, UndefinedError, \
+ TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
+ TemplateAssertionError, TemplateRuntimeError
+
+# decorators and public utilities
+from jinja2.filters import environmentfilter, contextfilter, \
+ evalcontextfilter
+from jinja2.utils import Markup, escape, clear_caches, \
+ environmentfunction, evalcontextfunction, contextfunction, \
+ is_undefined, select_autoescape
+
+__all__ = [
+ 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
+ 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
+ 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
+ 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
+ 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
+ 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
+ 'TemplateRuntimeError',
+ 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
+ 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
+ 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
+ 'select_autoescape',
+]
+
+
+def _patch_async():
+ from jinja2.utils import have_async_gen
+ if have_async_gen:
+ from jinja2.asyncsupport import patch_all
+ patch_all()
+
+
+_patch_async()
+del _patch_async
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_compat.py
old mode 100644
new mode 100755
index 1f044954..61d85301
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_compat.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_compat.py
@@ -1,12 +1,22 @@
# -*- coding: utf-8 -*-
-# flake8: noqa
-import marshal
+"""
+ jinja2._compat
+ ~~~~~~~~~~~~~~
+
+ Some py2/py3 compatibility support based on a stripped down
+ version of six so we don't have to depend on a specific version
+ of it.
+
+ :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
import sys
PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, "pypy_translation_info")
+PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
+
if not PY2:
unichr = chr
range_type = range
@@ -20,7 +30,6 @@
import pickle
from io import BytesIO, StringIO
-
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
@@ -37,9 +46,6 @@ def reraise(tp, value, tb=None):
implements_to_string = _identity
encode_filename = _identity
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-
else:
unichr = unichr
text_type = unicode
@@ -53,13 +59,11 @@ def reraise(tp, value, tb=None):
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
-
NativeStringIO = BytesIO
- exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
+ exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
-
intern = intern
def implements_iterator(cls):
@@ -69,25 +73,14 @@ def implements_iterator(cls):
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
+ cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
- return filename.encode("utf-8")
+ return filename.encode('utf-8')
return filename
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
@@ -97,36 +90,10 @@ def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
-
- return type.__new__(metaclass, "temporary_class", (), {})
+ return type.__new__(metaclass, 'temporary_class', (), {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
-
-
-try:
- from collections import abc
-except ImportError:
- import collections as abc
-
-
-try:
- from os import fspath
-except ImportError:
- try:
- from pathlib import PurePath
- except ImportError:
- PurePath = None
-
- def fspath(path):
- if hasattr(path, "__fspath__"):
- return path.__fspath__()
-
- # Python 3.5 doesn't have __fspath__ yet, use str.
- if PurePath is not None and isinstance(path, PurePath):
- return str(path)
-
- return path
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_identifier.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_identifier.py
old mode 100644
new mode 100755
index 224d5449..2eac35d5
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_identifier.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/_identifier.py
@@ -1,6 +1,2 @@
-import re
-
# generated by scripts/generate_identifier_pattern.py
-pattern = re.compile(
- r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
-)
+pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncfilters.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncfilters.py
old mode 100644
new mode 100755
index 3d98dbcc..5c1f46d7
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncfilters.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncfilters.py
@@ -1,13 +1,12 @@
from functools import wraps
-from . import filters
-from .asyncsupport import auto_aiter
-from .asyncsupport import auto_await
+from jinja2.asyncsupport import auto_aiter
+from jinja2 import filters
async def auto_to_seq(value):
seq = []
- if hasattr(value, "__aiter__"):
+ if hasattr(value, '__aiter__'):
async for item in value:
seq.append(item)
else:
@@ -17,7 +16,8 @@ async def auto_to_seq(value):
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
+ seq, func = filters.prepare_select_or_reject(
+ args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
@@ -26,19 +26,14 @@ async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
- if getattr(normal_filter, "environmentfilter", False) is True:
-
- def is_async(args):
- return args[0].is_async
-
+ if getattr(normal_filter, 'environmentfilter', False):
+ is_async = lambda args: args[0].is_async
wrap_evalctx = False
else:
- has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
- has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
- wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
-
- def is_async(args):
- return args[0].environment.is_async
+ if not getattr(normal_filter, 'evalcontextfilter', False) and \
+ not getattr(normal_filter, 'contextfilter', False):
+ wrap_evalctx = True
+ is_async = lambda args: args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
@@ -60,7 +55,6 @@ def wrapper(*args, **kwargs):
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
-
return decorator
@@ -69,22 +63,19 @@ async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
- return environment.undefined("No first item, sequence was empty.")
+ return environment.undefined('No first item, sequence was empty.')
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
- return [
- filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(
- sorted(await auto_to_seq(value), key=expr), expr
- )
- ]
+ return [filters._GroupTuple(key, await auto_to_seq(values))
+ for key, values in filters.groupby(sorted(
+ await auto_to_seq(value), key=expr), expr)]
@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u"", attribute=None):
+async def do_join(eval_ctx, value, d=u'', attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@@ -118,7 +109,7 @@ async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
- yield await auto_await(func(item))
+ yield func(item)
@asyncfiltervariant(filters.do_sum)
@@ -127,10 +118,7 @@ async def do_sum(environment, iterable, attribute=None, start=0):
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
-
- def func(x):
- return x
-
+ func = lambda x: x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@@ -142,17 +130,17 @@ async def do_slice(value, slices, fill_with=None):
ASYNC_FILTERS = {
- "first": do_first,
- "groupby": do_groupby,
- "join": do_join,
- "list": do_list,
+ 'first': do_first,
+ 'groupby': do_groupby,
+ 'join': do_join,
+ 'list': do_list,
# we intentionally do not support do_last because that would be
# ridiculous
- "reject": do_reject,
- "rejectattr": do_rejectattr,
- "map": do_map,
- "select": do_select,
- "selectattr": do_selectattr,
- "sum": do_sum,
- "slice": do_slice,
+ 'reject': do_reject,
+ 'rejectattr': do_rejectattr,
+ 'map': do_map,
+ 'select': do_select,
+ 'selectattr': do_selectattr,
+ 'sum': do_sum,
+ 'slice': do_slice,
}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncsupport.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncsupport.py
old mode 100644
new mode 100755
index 78ba3739..b1e7b5ce
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncsupport.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/asyncsupport.py
@@ -1,27 +1,29 @@
# -*- coding: utf-8 -*-
-"""The code for async support. Importing this patches Jinja on supported
-Python versions.
"""
+ jinja2.asyncsupport
+ ~~~~~~~~~~~~~~~~~~~
+
+ Has all the code for async support which is implemented as a patch
+ for supported Python versions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
import asyncio
import inspect
from functools import update_wrapper
-from markupsafe import Markup
-
-from .environment import TemplateModule
-from .runtime import LoopContext
-from .utils import concat
-from .utils import internalcode
-from .utils import missing
+from jinja2.utils import concat, internalcode, Markup
+from jinja2.environment import TemplateModule
+from jinja2.runtime import LoopContextBase, _last_iteration
async def concat_async(async_gen):
rv = []
-
async def collect():
async for event in async_gen:
rv.append(event)
-
await collect()
return concat(rv)
@@ -32,7 +34,10 @@ async def generate_async(self, *args, **kwargs):
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- yield self.environment.handle_exception()
+ exc_info = sys.exc_info()
+ else:
+ return
+ yield self.environment.handle_exception(exc_info, True)
def wrap_generate_func(original_generate):
@@ -43,18 +48,17 @@ def _convert_generator(self, loop, args, kwargs):
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
-
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
-
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
- raise RuntimeError("The environment was not created with async mode enabled.")
+ raise RuntimeError('The environment was not created with async mode '
+ 'enabled.')
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
@@ -62,7 +66,8 @@ async def render_async(self, *args, **kwargs):
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
- return self.environment.handle_exception()
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
def wrap_render_func(original_render):
@@ -71,7 +76,6 @@ def render(self, *args, **kwargs):
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
-
return update_wrapper(render, original_render)
@@ -105,7 +109,6 @@ def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
-
return update_wrapper(_invoke, original_invoke)
@@ -121,9 +124,9 @@ def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
- raise RuntimeError("Template module attribute is unavailable in async mode")
+ raise RuntimeError('Template module attribute is unavailable '
+ 'in async mode')
return original_default_module(self)
-
return _get_default_module
@@ -136,30 +139,30 @@ async def make_module_async(self, vars=None, shared=False, locals=None):
def patch_template():
- from . import Template
-
+ from jinja2 import Template
Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(generate_async, Template.generate_async)
- Template.render_async = update_wrapper(render_async, Template.render_async)
+ Template.generate_async = update_wrapper(
+ generate_async, Template.generate_async)
+ Template.render_async = update_wrapper(
+ render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(Template._get_default_module)
+ Template._get_default_module = wrap_default_module(
+ Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async
- )
+ make_module_async, Template.make_module_async)
def patch_runtime():
- from .runtime import BlockReference, Macro
-
- BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
+ from jinja2.runtime import BlockReference, Macro
+ BlockReference.__call__ = wrap_block_reference_call(
+ BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
- from .filters import FILTERS
- from .asyncfilters import ASYNC_FILTERS
-
+ from jinja2.filters import FILTERS
+ from jinja2.asyncfilters import ASYNC_FILTERS
FILTERS.update(ASYNC_FILTERS)
@@ -176,7 +179,7 @@ async def auto_await(value):
async def auto_aiter(iterable):
- if hasattr(iterable, "__aiter__"):
+ if hasattr(iterable, '__aiter__'):
async for item in iterable:
yield item
return
@@ -184,81 +187,70 @@ async def auto_aiter(iterable):
yield item
-class AsyncLoopContext(LoopContext):
- _to_iterator = staticmethod(auto_aiter)
-
- @property
- async def length(self):
- if self._length is not None:
- return self._length
-
- try:
- self._length = len(self._iterable)
- except TypeError:
- iterable = [x async for x in self._iterator]
- self._iterator = self._to_iterator(iterable)
- self._length = len(iterable) + self.index + (self._after is not missing)
-
- return self._length
+class AsyncLoopContext(LoopContextBase):
- @property
- async def revindex0(self):
- return await self.length - self.index
+ def __init__(self, async_iterator, undefined, after, length, recurse=None,
+ depth0=0):
+ LoopContextBase.__init__(self, undefined, recurse, depth0)
+ self._async_iterator = async_iterator
+ self._after = after
+ self._length = length
@property
- async def revindex(self):
- return await self.length - self.index0
-
- async def _peek_next(self):
- if self._after is not missing:
- return self._after
-
- try:
- self._after = await self._iterator.__anext__()
- except StopAsyncIteration:
- self._after = missing
-
- return self._after
+ def length(self):
+ if self._length is None:
+ raise TypeError('Loop length for some iterators cannot be '
+ 'lazily calculated in async mode')
+ return self._length
- @property
- async def last(self):
- return await self._peek_next() is missing
+ def __aiter__(self):
+ return AsyncLoopContextIterator(self)
- @property
- async def nextitem(self):
- rv = await self._peek_next()
- if rv is missing:
- return self._undefined("there is no next item")
+class AsyncLoopContextIterator(object):
+ __slots__ = ('context',)
- return rv
+ def __init__(self, context):
+ self.context = context
def __aiter__(self):
return self
async def __anext__(self):
- if self._after is not missing:
- rv = self._after
- self._after = missing
- else:
- rv = await self._iterator.__anext__()
-
- self.index0 += 1
- self._before = self._current
- self._current = rv
- return rv, self
+ ctx = self.context
+ ctx.index0 += 1
+ if ctx._after is _last_iteration:
+ raise StopAsyncIteration()
+ ctx._before = ctx._current
+ ctx._current = ctx._after
+ try:
+ ctx._after = await ctx._async_iterator.__anext__()
+ except StopAsyncIteration:
+ ctx._after = _last_iteration
+ return ctx._current, ctx
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- import warnings
-
- warnings.warn(
- "This template must be recompiled with at least Jinja 2.11, or"
- " it will fail in 3.0.",
- DeprecationWarning,
- stacklevel=2,
- )
- return AsyncLoopContext(iterable, undefined, recurse, depth0)
-
-
-patch_all()
+ # Length is more complicated and less efficient in async mode. The
+ # reason for this is that we cannot know if length will be used
+ # upfront but because length is a property we cannot lazily execute it
+ # later. This means that we need to buffer it up and measure :(
+ #
+ # We however only do this for actual iterators, not for async
+ # iterators as blocking here does not seem like the best idea in the
+ # world.
+ try:
+ length = len(iterable)
+ except (TypeError, AttributeError):
+ if not hasattr(iterable, '__aiter__'):
+ iterable = tuple(iterable)
+ length = len(iterable)
+ else:
+ length = None
+ async_iterator = auto_aiter(iterable)
+ try:
+ after = await async_iterator.__anext__()
+ except StopAsyncIteration:
+ after = _last_iteration
+ return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
+ depth0)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/bccache.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/bccache.py
old mode 100644
new mode 100755
index 9c066103..080e527c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/bccache.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/bccache.py
@@ -1,37 +1,60 @@
# -*- coding: utf-8 -*-
-"""The optional bytecode cache system. This is useful if you have very
-complex template situations and the compilation of all those templates
-slows down your application too much.
+"""
+ jinja2.bccache
+ ~~~~~~~~~~~~~~
+
+ This module implements the bytecode cache system Jinja is optionally
+ using. This is useful if you have very complex template situations and
+ the compiliation of all those templates slow down your application too
+ much.
+
+ Situations where this is useful are often forking web applications that
+ are initialized on the first request.
-Situations where this is useful are often forking web applications that
-are initialized on the first request.
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
"""
-import errno
-import fnmatch
+from os import path, listdir
import os
-import stat
import sys
+import stat
+import errno
+import marshal
import tempfile
+import fnmatch
from hashlib import sha1
-from os import listdir
-from os import path
-
-from ._compat import BytesIO
-from ._compat import marshal_dump
-from ._compat import marshal_load
-from ._compat import pickle
-from ._compat import text_type
-from .utils import open_if_exists
-
-bc_version = 4
-# Magic bytes to identify Jinja bytecode cache files. Contains the
-# Python major and minor version to avoid loading incompatible bytecode
-# if a project upgrades its Python version.
-bc_magic = (
- b"j2"
- + pickle.dumps(bc_version, 2)
- + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
-)
+from jinja2.utils import open_if_exists
+from jinja2._compat import BytesIO, pickle, PY2, text_type
+
+
+# marshal works better on 3.x, one hack less required
+if not PY2:
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+else:
+
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
+
+bc_version = 3
+
+# magic version used to only change with new jinja versions. With 2.6
+# we change this to also take Python version changes into account. The
+# reason for this is that Python tends to segfault if fed earlier bytecode
+# versions because someone thought it would be a good idea to reuse opcodes
+# or make Python incompatible with earlier versions.
+bc_magic = 'j2'.encode('ascii') + \
+ pickle.dumps(bc_version, 2) + \
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
@@ -75,7 +98,7 @@ def load_bytecode(self, f):
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
- raise TypeError("can't write empty bucket")
+ raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
@@ -117,7 +140,7 @@ def dump_bytecode(self, bucket):
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
- Jinja.
+ Jinja2.
"""
def load_bytecode(self, bucket):
@@ -135,24 +158,24 @@ def dump_bytecode(self, bucket):
raise NotImplementedError()
def clear(self):
- """Clears the cache. This method is not used by Jinja but should be
+ """Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
- hash = sha1(name.encode("utf-8"))
+ hash = sha1(name.encode('utf-8'))
if filename is not None:
- filename = "|" + filename
+ filename = '|' + filename
if isinstance(filename, text_type):
- filename = filename.encode("utf-8")
+ filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
- return sha1(source.encode("utf-8")).hexdigest()
+ return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
@@ -187,7 +210,7 @@ class FileSystemBytecodeCache(BytecodeCache):
This bytecode cache supports clearing of the cache using the clear method.
"""
- def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
+ def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
@@ -195,21 +218,19 @@ def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
def _get_default_cache_dir(self):
def _unsafe_dir():
- raise RuntimeError(
- "Cannot determine safe temp directory. You "
- "need to explicitly provide one."
- )
+ raise RuntimeError('Cannot determine safe temp directory. You '
+ 'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
- if os.name == "nt":
+ if os.name == 'nt':
return tmpdir
- if not hasattr(os, "getuid"):
+ if not hasattr(os, 'getuid'):
_unsafe_dir()
- dirname = "_jinja2-cache-%d" % os.getuid()
+ dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
@@ -220,22 +241,18 @@ def _unsafe_dir():
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
- if (
- actual_dir_stat.st_uid != os.getuid()
- or not stat.S_ISDIR(actual_dir_stat.st_mode)
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
- ):
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
- if (
- actual_dir_stat.st_uid != os.getuid()
- or not stat.S_ISDIR(actual_dir_stat.st_mode)
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
- ):
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
@@ -244,7 +261,7 @@ def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), "rb")
+ f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
@@ -252,7 +269,7 @@ def load_bytecode(self, bucket):
f.close()
def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), "wb")
+ f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
@@ -263,8 +280,7 @@ def clear(self):
# write access on the file system and the function does not exist
# normally.
from os import remove
-
- files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
+ files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
@@ -280,8 +296,9 @@ class MemcachedBytecodeCache(BytecodeCache):
Libraries compatible with this class:
- - `cachelib `_
- - `python-memcached `_
+ - `werkzeug `_.contrib.cache
+ - `python-memcached `_
+ - `cmemcache `_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
@@ -317,13 +334,8 @@ class MemcachedBytecodeCache(BytecodeCache):
`ignore_memcache_errors` parameter.
"""
- def __init__(
- self,
- client,
- prefix="jinja2/bytecode/",
- timeout=None,
- ignore_memcache_errors=True,
- ):
+ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
+ ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/compiler.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/compiler.py
old mode 100644
new mode 100755
index 63297b42..d534a827
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/compiler.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/compiler.py
@@ -1,62 +1,59 @@
# -*- coding: utf-8 -*-
-"""Compiles nodes from the parser into Python code."""
-from collections import namedtuple
-from functools import update_wrapper
+"""
+ jinja2.compiler
+ ~~~~~~~~~~~~~~~
+
+ Compiles nodes into python code.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
from itertools import chain
+from copy import deepcopy
from keyword import iskeyword as is_python_keyword
+from functools import update_wrapper
+from jinja2 import nodes
+from jinja2.nodes import EvalContext
+from jinja2.visitor import NodeVisitor
+from jinja2.optimizer import Optimizer
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import Markup, concat, escape
+from jinja2._compat import range_type, text_type, string_types, \
+ iteritems, NativeStringIO, imap, izip
+from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
+ VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
-from markupsafe import escape
-from markupsafe import Markup
-
-from . import nodes
-from ._compat import imap
-from ._compat import iteritems
-from ._compat import izip
-from ._compat import NativeStringIO
-from ._compat import range_type
-from ._compat import string_types
-from ._compat import text_type
-from .exceptions import TemplateAssertionError
-from .idtracking import Symbols
-from .idtracking import VAR_LOAD_ALIAS
-from .idtracking import VAR_LOAD_PARAMETER
-from .idtracking import VAR_LOAD_RESOLVE
-from .idtracking import VAR_LOAD_UNDEFINED
-from .nodes import EvalContext
-from .optimizer import Optimizer
-from .utils import concat
-from .visitor import NodeVisitor
operators = {
- "eq": "==",
- "ne": "!=",
- "gt": ">",
- "gteq": ">=",
- "lt": "<",
- "lteq": "<=",
- "in": "in",
- "notin": "not in",
+ 'eq': '==',
+ 'ne': '!=',
+ 'gt': '>',
+ 'gteq': '>=',
+ 'lt': '<',
+ 'lteq': '<=',
+ 'in': 'in',
+ 'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, "iteritems"):
- dict_item_iter = "iteritems"
+if hasattr(dict, 'iteritems'):
+ dict_item_iter = 'iteritems'
else:
- dict_item_iter = "items"
+ dict_item_iter = 'items'
-code_features = ["division"]
+code_features = ['division']
# does this python version support generator stops? (PEP 0479)
try:
- exec("from __future__ import generator_stop")
- code_features.append("generator_stop")
+ exec('from __future__ import generator_stop')
+ code_features.append('generator_stop')
except SyntaxError:
pass
# does this python version support yield from?
try:
- exec("def f(): yield from x()")
+ exec('def f(): yield from x()')
except SyntaxError:
supports_yield_from = False
else:
@@ -71,19 +68,17 @@ def new_func(self, node, frame, **kwargs):
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
-
return update_wrapper(new_func, f)
-def generate(
- node, environment, name, filename, stream=None, defer_init=False, optimized=True
-):
+def generate(node, environment, name, filename, stream=None,
+ defer_init=False, optimized=True):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
- raise TypeError("Can't compile non template nodes")
- generator = environment.code_generator_class(
- environment, name, filename, stream, defer_init, optimized
- )
+ raise TypeError('Can\'t compile non template nodes')
+ generator = environment.code_generator_class(environment, name, filename,
+ stream, defer_init,
+ optimized)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
@@ -124,6 +119,7 @@ def find_undeclared(nodes, names):
class MacroRef(object):
+
def __init__(self, node):
self.node = node
self.accesses_caller = False
@@ -136,7 +132,8 @@ class Frame(object):
def __init__(self, eval_ctx, parent=None, level=None):
self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None, level=level)
+ self.symbols = Symbols(parent and parent.symbols or None,
+ level=level)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
@@ -226,7 +223,7 @@ def __init__(self, names):
self.undeclared = set()
def visit_Name(self, node):
- if node.ctx == "load" and node.name in self.names:
+ if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
@@ -245,9 +242,9 @@ class CompilerExit(Exception):
class CodeGenerator(NodeVisitor):
- def __init__(
- self, environment, name, filename, stream=None, defer_init=False, optimized=True
- ):
+
+ def __init__(self, environment, name, filename, stream=None,
+ defer_init=False, optimized=True):
if stream is None:
stream = NativeStringIO()
self.environment = environment
@@ -309,7 +306,7 @@ def __init__(
self._param_def_block = []
# Tracks the current context.
- self._context_reference_stack = ["context"]
+ self._context_reference_stack = ['context']
# -- Various compilation helpers
@@ -320,30 +317,30 @@ def fail(self, msg, lineno):
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
- return "t_%d" % self._last_identifier
+ return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
- self.writeline("%s = []" % frame.buffer)
+ self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
- self.writeline("if context.eval_ctx.autoescape:")
+ self.writeline('if context.eval_ctx.autoescape:')
self.indent()
- self.writeline("return Markup(concat(%s))" % frame.buffer)
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
- self.writeline("else:")
+ self.writeline('else:')
self.indent()
- self.writeline("return concat(%s)" % frame.buffer)
+ self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
- self.writeline("return Markup(concat(%s))" % frame.buffer)
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
return
- self.writeline("return concat(%s)" % frame.buffer)
+ self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
@@ -356,14 +353,14 @@ def outdent(self, step=1):
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
- self.writeline("yield ", node)
+ self.writeline('yield ', node)
else:
- self.writeline("%s.append(" % frame.buffer, node)
+ self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
- self.write(")")
+ self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
@@ -376,7 +373,7 @@ def blockvisit(self, nodes, frame):
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
- self.writeline("pass")
+ self.writeline('pass')
for node in nodes:
self.visit(node, frame)
except CompilerExit:
@@ -386,13 +383,14 @@ def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
- self.stream.write("\n" * self._new_lines)
+ self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
- self.debug_info.append((self._write_debug_info, self.code_lineno))
+ self.debug_info.append((self._write_debug_info,
+ self.code_lineno))
self._write_debug_info = None
self._first_write = False
- self.stream.write(" " * self._indentation)
+ self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
@@ -412,7 +410,7 @@ def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
- error could occur. The extra keyword arguments should be given
+ error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
@@ -424,41 +422,41 @@ def signature(self, node, frame, extra_kwargs=None):
break
for arg in node.args:
- self.write(", ")
+ self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
- self.write(", ")
+ self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write(", %s=%s" % (key, value))
+ self.write(', %s=%s' % (key, value))
if node.dyn_args:
- self.write(", *")
+ self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
- self.write(", **dict({")
+ self.write(', **dict({')
else:
- self.write(", **{")
+ self.write(', **{')
for kwarg in node.kwargs:
- self.write("%r: " % kwarg.key)
+ self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
- self.write(", ")
+ self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write("%r: %s, " % (key, value))
+ self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
- self.write("}, **")
+ self.write('}, **')
self.visit(node.dyn_kwargs, frame)
- self.write(")")
+ self.write(')')
else:
- self.write("}")
+ self.write('}')
elif node.dyn_kwargs is not None:
- self.write(", **")
+ self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
@@ -466,14 +464,13 @@ def pull_dependencies(self, nodes):
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
- for dependency in "filters", "tests":
+ for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
- self.writeline(
- "%s = environment.%s[%r]" % (mapping[name], dependency, name)
- )
+ self.writeline('%s = environment.%s[%r]' %
+ (mapping[name], dependency, name))
def enter_frame(self, frame):
undefs = []
@@ -481,15 +478,16 @@ def enter_frame(self, frame):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
- self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
+ self.writeline('%s = %s(%r)' %
+ (target, self.get_resolve_func(), param))
elif action == VAR_LOAD_ALIAS:
- self.writeline("%s = %s" % (target, param))
+ self.writeline('%s = %s' % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
- raise NotImplementedError("unknown load instruction")
+ raise NotImplementedError('unknown load instruction')
if undefs:
- self.writeline("%s = missing" % " = ".join(undefs))
+ self.writeline('%s = missing' % ' = '.join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
@@ -497,12 +495,12 @@ def leave_frame(self, frame, with_python_scope=False):
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
- self.writeline("%s = missing" % " = ".join(undefs))
+ self.writeline('%s = missing' % ' = '.join(undefs))
def func(self, name):
if self.environment.is_async:
- return "async def %s" % name
- return "def %s" % name
+ return 'async def %s' % name
+ return 'def %s' % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
@@ -514,16 +512,16 @@ def macro_body(self, node, frame):
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
- if arg.name == "caller":
+ if arg.name == 'caller':
explicit_caller = idx
- if arg.name in ("kwargs", "varargs"):
+ if arg.name in ('kwargs', 'varargs'):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
- undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
+ undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
- if "caller" in undeclared:
- # In older Jinja versions there was a bug that allowed caller
+ if 'caller' in undeclared:
+ # In older Jinja2 versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
@@ -533,26 +531,23 @@ def macro_body(self, node, frame):
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
- self.fail(
- "When defining macros or call blocks the "
- 'special "caller" argument must be omitted '
- "or be given a default.",
- node.lineno,
- )
+ self.fail('When defining macros or call blocks the '
+ 'special "caller" argument must be omitted '
+ 'or be given a default.', node.lineno)
else:
- args.append(frame.symbols.declare_parameter("caller"))
+ args.append(frame.symbols.declare_parameter('caller'))
macro_ref.accesses_caller = True
- if "kwargs" in undeclared and "kwargs" not in skip_special_params:
- args.append(frame.symbols.declare_parameter("kwargs"))
+ if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
+ args.append(frame.symbols.declare_parameter('kwargs'))
macro_ref.accesses_kwargs = True
- if "varargs" in undeclared and "varargs" not in skip_special_params:
- args.append(frame.symbols.declare_parameter("varargs"))
+ if 'varargs' in undeclared and not 'varargs' in skip_special_params:
+ args.append(frame.symbols.declare_parameter('varargs'))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
- self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
+ self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
self.indent()
self.buffer(frame)
@@ -561,17 +556,17 @@ def macro_body(self, node, frame):
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
- self.writeline("if %s is missing:" % ref)
+ self.writeline('if %s is missing:' % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
- self.writeline(
- "%s = undefined(%r, name=%r)"
- % (ref, "parameter %r was not provided" % arg.name, arg.name)
- )
+ self.writeline('%s = undefined(%r, name=%r)' % (
+ ref,
+ 'parameter %r was not provided' % arg.name,
+ arg.name))
else:
- self.writeline("%s = " % ref)
+ self.writeline('%s = ' % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
@@ -586,46 +581,35 @@ def macro_body(self, node, frame):
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
- arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
- name = getattr(macro_ref.node, "name", None)
+ arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, 'name', None)
if len(macro_ref.node.args) == 1:
- arg_tuple += ","
- self.write(
- "Macro(environment, macro, %r, (%s), %r, %r, %r, "
- "context.eval_ctx.autoescape)"
- % (
- name,
- arg_tuple,
- macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs,
- macro_ref.accesses_caller,
- )
- )
+ arg_tuple += ','
+ self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
+ 'context.eval_ctx.autoescape)' %
+ (name, arg_tuple, macro_ref.accesses_kwargs,
+ macro_ref.accesses_varargs, macro_ref.accesses_caller))
def position(self, node):
"""Return a human readable position for the node."""
- rv = "line %d" % node.lineno
+ rv = 'line %d' % node.lineno
if self.name is not None:
- rv += " in " + repr(self.name)
+ rv += ' in ' + repr(self.name)
return rv
def dump_local_context(self, frame):
- return "{%s}" % ", ".join(
- "%r: %s" % (name, target)
- for name, target in iteritems(frame.symbols.dump_stores())
- )
+ return '{%s}' % ', '.join(
+ '%r: %s' % (name, target) for name, target
+ in iteritems(frame.symbols.dump_stores()))
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
- self.writeline("resolve = context.resolve_or_missing")
- self.writeline("undefined = environment.undefined")
- # always use the standard Undefined class for the implicit else of
- # conditional expressions
- self.writeline("cond_expr_undefined = Undefined")
- self.writeline("if 0: yield None")
+ self.writeline('resolve = context.resolve_or_missing')
+ self.writeline('undefined = environment.undefined')
+ self.writeline('if 0: yield None')
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
@@ -658,12 +642,12 @@ def get_context_ref(self):
def get_resolve_func(self):
target = self._context_reference_stack[-1]
- if target == "context":
- return "resolve"
- return "%s.resolve" % target
+ if target == 'context':
+ return 'resolve'
+ return '%s.resolve' % target
def derive_context(self, frame):
- return "%s.derived(%s)" % (
+ return '%s.derived(%s)' % (
self.get_context_ref(),
self.dump_local_context(frame),
)
@@ -685,48 +669,44 @@ def pop_assign_tracking(self, frame):
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
- public_names = [x for x in vars if x[:1] != "_"]
+ public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
- self.writeline("context.vars[%r] = %s" % (name, ref))
+ self.writeline('context.vars[%r] = %s' % (name, ref))
else:
- self.writeline("context.vars.update({")
+ self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
- self.write(", ")
+ self.write(', ')
ref = frame.symbols.ref(name)
- self.write("%r: %s" % (name, ref))
- self.write("})")
+ self.write('%r: %s' % (name, ref))
+ self.write('})')
if public_names:
if len(public_names) == 1:
- self.writeline("context.exported_vars.add(%r)" % public_names[0])
+ self.writeline('context.exported_vars.add(%r)' %
+ public_names[0])
else:
- self.writeline(
- "context.exported_vars.update((%s))"
- % ", ".join(imap(repr, public_names))
- )
+ self.writeline('context.exported_vars.update((%s))' %
+ ', '.join(imap(repr, public_names)))
# -- Statement Visitors
def visit_Template(self, node, frame=None):
- assert frame is None, "no root frame allowed"
+ assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
- from .runtime import exported
-
- self.writeline("from __future__ import %s" % ", ".join(code_features))
- self.writeline("from jinja2.runtime import " + ", ".join(exported))
+ from jinja2.runtime import __all__ as exported
+ self.writeline('from __future__ import %s' % ', '.join(code_features))
+ self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if self.environment.is_async:
- self.writeline(
- "from jinja2.asyncsupport import auto_await, "
- "auto_aiter, AsyncLoopContext"
- )
+ self.writeline('from jinja2.asyncsupport import auto_await, '
+ 'auto_aiter, make_async_loop_context')
# if we want a deferred initialization we cannot move the
# environment into a local name
- envenv = not self.defer_init and ", environment=environment" or ""
+ envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
@@ -735,7 +715,7 @@ def visit_Template(self, node, frame=None):
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
- self.fail("block %r defined twice" % block.name, block.lineno)
+ self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
@@ -743,32 +723,32 @@ def visit_Template(self, node, frame=None):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
- if "." in imp:
- module, obj = imp.rsplit(".", 1)
- self.writeline("from %s import %s as %s" % (module, obj, alias))
+ if '.' in imp:
+ module, obj = imp.rsplit('.', 1)
+ self.writeline('from %s import %s as %s' %
+ (module, obj, alias))
else:
- self.writeline("import %s as %s" % (imp, alias))
+ self.writeline('import %s as %s' % (imp, alias))
# add the load name
- self.writeline("name = %r" % self.name)
+ self.writeline('name = %r' % self.name)
# generate the root render function.
- self.writeline(
- "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
- )
+ self.writeline('%s(context, missing=missing%s):' %
+ (self.func('root'), envenv), extra=1)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
- if "self" in find_undeclared(node.body, ("self",)):
- ref = frame.symbols.declare_parameter("self")
- self.writeline("%s = TemplateReference(context)" % ref)
+ if 'self' in find_undeclared(node.body, ('self',)):
+ ref = frame.symbols.declare_parameter('self')
+ self.writeline('%s = TemplateReference(context)' % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
- self.writeline("parent_template = None")
+ self.writeline('parent_template = None')
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
@@ -779,42 +759,39 @@ def visit_Template(self, node, frame=None):
if have_extends:
if not self.has_known_extends:
self.indent()
- self.writeline("if parent_template is not None:")
+ self.writeline('if parent_template is not None:')
self.indent()
if supports_yield_from and not self.environment.is_async:
- self.writeline("yield from parent_template.root_render_func(context)")
+ self.writeline('yield from parent_template.'
+ 'root_render_func(context)')
else:
- self.writeline(
- "%sfor event in parent_template."
- "root_render_func(context):"
- % (self.environment.is_async and "async " or "")
- )
+ self.writeline('%sfor event in parent_template.'
+ 'root_render_func(context):' %
+ (self.environment.is_async and 'async ' or ''))
self.indent()
- self.writeline("yield event")
+ self.writeline('yield event')
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
- self.writeline(
- "%s(context, missing=missing%s):"
- % (self.func("block_" + name), envenv),
- block,
- 1,
- )
+ self.writeline('%s(context, missing=missing%s):' %
+ (self.func('block_' + name), envenv),
+ block, 1)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
- undeclared = find_undeclared(block.body, ("self", "super"))
- if "self" in undeclared:
- ref = block_frame.symbols.declare_parameter("self")
- self.writeline("%s = TemplateReference(context)" % ref)
- if "super" in undeclared:
- ref = block_frame.symbols.declare_parameter("super")
- self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
+ undeclared = find_undeclared(block.body, ('self', 'super'))
+ if 'self' in undeclared:
+ ref = block_frame.symbols.declare_parameter('self')
+ self.writeline('%s = TemplateReference(context)' % ref)
+ if 'super' in undeclared:
+ ref = block_frame.symbols.declare_parameter('super')
+ self.writeline('%s = context.super(%r, '
+ 'block_%s)' % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
@@ -823,15 +800,13 @@ def visit_Template(self, node, frame=None):
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
- self.writeline(
- "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
- extra=1,
- )
+ self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
+ for x in self.blocks),
+ extra=1)
# add a function that returns the debug info
- self.writeline(
- "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
- )
+ self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
+ in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
@@ -842,7 +817,7 @@ def visit_Block(self, node, frame):
if self.has_known_extends:
return
if self.extends_so_far > 0:
- self.writeline("if parent_template is None:")
+ self.writeline('if parent_template is None:')
self.indent()
level += 1
@@ -851,22 +826,16 @@ def visit_Block(self, node, frame):
else:
context = self.get_context_ref()
- if (
- supports_yield_from
- and not self.environment.is_async
- and frame.buffer is None
- ):
- self.writeline(
- "yield from context.blocks[%r][0](%s)" % (node.name, context), node
- )
+ if supports_yield_from and not self.environment.is_async and \
+ frame.buffer is None:
+ self.writeline('yield from context.blocks[%r][0](%s)' % (
+ node.name, context), node)
else:
- loop = self.environment.is_async and "async for" or "for"
- self.writeline(
- "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
- node,
- )
+ loop = self.environment.is_async and 'async for' or 'for'
+ self.writeline('%s event in context.blocks[%r][0](%s):' % (
+ loop, node.name, context), node)
self.indent()
- self.simple_write("event", frame)
+ self.simple_write('event', frame)
self.outdent()
self.outdent(level)
@@ -874,7 +843,8 @@ def visit_Block(self, node, frame):
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
- self.fail("cannot use extend from a non top-level scope", node.lineno)
+ self.fail('cannot use extend from a non top-level scope',
+ node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
@@ -886,9 +856,10 @@ def visit_Extends(self, node, frame):
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
- self.writeline("if parent_template is not None:")
+ self.writeline('if parent_template is not None:')
self.indent()
- self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
+ self.writeline('raise TemplateRuntimeError(%r)' %
+ 'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
@@ -897,14 +868,14 @@ def visit_Extends(self, node, frame):
else:
self.outdent()
- self.writeline("parent_template = environment.get_template(", node)
+ self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
- self.write(", %r)" % self.name)
- self.writeline(
- "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
- )
+ self.write(', %r)' % self.name)
+ self.writeline('for name, parent_block in parent_template.'
+ 'blocks.%s():' % dict_item_iter)
self.indent()
- self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
+ self.writeline('context.blocks.setdefault(name, []).'
+ 'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
@@ -919,56 +890,52 @@ def visit_Extends(self, node, frame):
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
- self.writeline("try:")
+ self.writeline('try:')
self.indent()
- func_name = "get_or_select_template"
+ func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
- func_name = "get_template"
+ func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
- func_name = "select_template"
+ func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
- func_name = "select_template"
+ func_name = 'select_template'
- self.writeline("template = environment.%s(" % func_name, node)
+ self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
- self.write(", %r)" % self.name)
+ self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
- self.writeline("except TemplateNotFound:")
+ self.writeline('except TemplateNotFound:')
self.indent()
- self.writeline("pass")
+ self.writeline('pass')
self.outdent()
- self.writeline("else:")
+ self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
- loop = self.environment.is_async and "async for" or "for"
- self.writeline(
- "%s event in template.root_render_func("
- "template.new_context(context.get_all(), True, "
- "%s)):" % (loop, self.dump_local_context(frame))
- )
+ loop = self.environment.is_async and 'async for' or 'for'
+ self.writeline('%s event in template.root_render_func('
+ 'template.new_context(context.get_all(), True, '
+ '%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
- self.writeline(
- "for event in (await "
- "template._get_default_module_async())"
- "._body_stream:"
- )
+ self.writeline('for event in (await '
+ 'template._get_default_module_async())'
+ '._body_stream:')
else:
if supports_yield_from:
- self.writeline("yield from template._get_default_module()._body_stream")
+ self.writeline('yield from template._get_default_module()'
+ '._body_stream')
skip_event_yield = True
else:
- self.writeline(
- "for event in template._get_default_module()._body_stream:"
- )
+ self.writeline('for event in template._get_default_module()'
+ '._body_stream:')
if not skip_event_yield:
self.indent()
- self.simple_write("event", frame)
+ self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
@@ -976,50 +943,40 @@ def visit_Include(self, node, frame):
def visit_Import(self, node, frame):
"""Visit regular imports."""
- self.writeline("%s = " % frame.symbols.ref(node.target), node)
+ self.writeline('%s = ' % frame.symbols.ref(node.target), node)
if frame.toplevel:
- self.write("context.vars[%r] = " % node.target)
+ self.write('context.vars[%r] = ' % node.target)
if self.environment.is_async:
- self.write("await ")
- self.write("environment.get_template(")
+ self.write('await ')
+ self.write('environment.get_template(')
self.visit(node.template, frame)
- self.write(", %r)." % self.name)
+ self.write(', %r).' % self.name)
if node.with_context:
- self.write(
- "make_module%s(context.get_all(), True, %s)"
- % (
- self.environment.is_async and "_async" or "",
- self.dump_local_context(frame),
- )
- )
+ self.write('make_module%s(context.get_all(), True, %s)'
+ % (self.environment.is_async and '_async' or '',
+ self.dump_local_context(frame)))
elif self.environment.is_async:
- self.write("_get_default_module_async()")
+ self.write('_get_default_module_async()')
else:
- self.write("_get_default_module()")
- if frame.toplevel and not node.target.startswith("_"):
- self.writeline("context.exported_vars.discard(%r)" % node.target)
+ self.write('_get_default_module()')
+ if frame.toplevel and not node.target.startswith('_'):
+ self.writeline('context.exported_vars.discard(%r)' % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
- self.write(
- "included_template = %senvironment.get_template("
- % (self.environment.is_async and "await " or "")
- )
+ self.write('included_template = %senvironment.get_template('
+ % (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
- self.write(", %r)." % self.name)
+ self.write(', %r).' % self.name)
if node.with_context:
- self.write(
- "make_module%s(context.get_all(), True, %s)"
- % (
- self.environment.is_async and "_async" or "",
- self.dump_local_context(frame),
- )
- )
+ self.write('make_module%s(context.get_all(), True, %s)'
+ % (self.environment.is_async and '_async' or '',
+ self.dump_local_context(frame)))
elif self.environment.is_async:
- self.write("_get_default_module_async()")
+ self.write('_get_default_module_async()')
else:
- self.write("_get_default_module()")
+ self.write('_get_default_module()')
var_names = []
discarded_names = []
@@ -1028,51 +985,41 @@ def visit_FromImport(self, node, frame):
name, alias = name
else:
alias = name
- self.writeline(
- "%s = getattr(included_template, "
- "%r, missing)" % (frame.symbols.ref(alias), name)
- )
- self.writeline("if %s is missing:" % frame.symbols.ref(alias))
+ self.writeline('%s = getattr(included_template, '
+ '%r, missing)' % (frame.symbols.ref(alias), name))
+ self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
- self.writeline(
- "%s = undefined(%r %% "
- "included_template.__name__, "
- "name=%r)"
- % (
- frame.symbols.ref(alias),
- "the template %%r (imported on %s) does "
- "not export the requested name %s"
- % (self.position(node), repr(name)),
- name,
- )
- )
+ self.writeline('%s = undefined(%r %% '
+ 'included_template.__name__, '
+ 'name=%r)' %
+ (frame.symbols.ref(alias),
+ 'the template %%r (imported on %s) does '
+ 'not export the requested name %s' % (
+ self.position(node),
+ repr(name)
+ ), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
- if not alias.startswith("_"):
+ if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
- self.writeline(
- "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
- )
+ self.writeline('context.vars[%r] = %s' %
+ (name, frame.symbols.ref(name)))
else:
- self.writeline(
- "context.vars.update({%s})"
- % ", ".join(
- "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
- )
- )
+ self.writeline('context.vars.update({%s})' % ', '.join(
+ '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
+ ))
if discarded_names:
if len(discarded_names) == 1:
- self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
+ self.writeline('context.exported_vars.discard(%r)' %
+ discarded_names[0])
else:
- self.writeline(
- "context.exported_vars.difference_"
- "update((%s))" % ", ".join(imap(repr, discarded_names))
- )
+ self.writeline('context.exported_vars.difference_'
+ 'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
loop_frame = frame.inner()
@@ -1082,35 +1029,35 @@ def visit_For(self, node, frame):
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
- extended_loop = node.recursive or "loop" in find_undeclared(
- node.iter_child_nodes(only=("body",)), ("loop",)
- )
+ extended_loop = node.recursive or 'loop' in \
+ find_undeclared(node.iter_child_nodes(
+ only=('body',)), ('loop',))
loop_ref = None
if extended_loop:
- loop_ref = loop_frame.symbols.declare_parameter("loop")
+ loop_ref = loop_frame.symbols.declare_parameter('loop')
- loop_frame.symbols.analyze_node(node, for_branch="body")
+ loop_frame.symbols.analyze_node(node, for_branch='body')
if node.else_:
- else_frame.symbols.analyze_node(node, for_branch="else")
+ else_frame.symbols.analyze_node(node, for_branch='else')
if node.test:
loop_filter_func = self.temporary_identifier()
- test_frame.symbols.analyze_node(node, for_branch="test")
- self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
+ test_frame.symbols.analyze_node(node, for_branch='test')
+ self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and "async for " or "for ")
+ self.writeline(self.environment.is_async and 'async for ' or 'for ')
self.visit(node.target, loop_frame)
- self.write(" in ")
- self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
- self.write(":")
+ self.write(' in ')
+ self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
+ self.write(':')
self.indent()
- self.writeline("if ", node.test)
+ self.writeline('if ', node.test)
self.visit(node.test, test_frame)
- self.write(":")
+ self.write(':')
self.indent()
- self.writeline("yield ")
+ self.writeline('yield ')
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
@@ -1119,9 +1066,8 @@ def visit_For(self, node, frame):
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
- self.writeline(
- "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
- )
+ self.writeline('%s(reciter, loop_render_func, depth=0):' %
+ self.func('loop'), node)
self.indent()
self.buffer(loop_frame)
@@ -1131,60 +1077,57 @@ def visit_For(self, node, frame):
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
- self.writeline("%s = missing" % loop_ref)
+ self.writeline('%s = missing' % loop_ref)
for name in node.find_all(nodes.Name):
- if name.ctx == "store" and name.name == "loop":
- self.fail(
- "Can't assign to special loop variable in for-loop target",
- name.lineno,
- )
+ if name.ctx == 'store' and name.name == 'loop':
+ self.fail('Can\'t assign to special loop variable '
+ 'in for-loop target', name.lineno)
if node.else_:
iteration_indicator = self.temporary_identifier()
- self.writeline("%s = 1" % iteration_indicator)
+ self.writeline('%s = 1' % iteration_indicator)
- self.writeline(self.environment.is_async and "async for " or "for ", node)
+ self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
- self.write(", %s in AsyncLoopContext(" % loop_ref)
+ self.write(', %s in await make_async_loop_context(' % loop_ref)
else:
- self.write(", %s in LoopContext(" % loop_ref)
+ self.write(', %s in LoopContext(' % loop_ref)
else:
- self.write(" in ")
+ self.write(' in ')
if node.test:
- self.write("%s(" % loop_filter_func)
+ self.write('%s(' % loop_filter_func)
if node.recursive:
- self.write("reciter")
+ self.write('reciter')
else:
if self.environment.is_async and not extended_loop:
- self.write("auto_aiter(")
+ self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
- self.write(")")
+ self.write(')')
if node.test:
- self.write(")")
+ self.write(')')
if node.recursive:
- self.write(", undefined, loop_render_func, depth):")
+ self.write(', undefined, loop_render_func, depth):')
else:
- self.write(extended_loop and ", undefined):" or ":")
+ self.write(extended_loop and ', undefined):' or ':')
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
- self.writeline("%s = 0" % iteration_indicator)
+ self.writeline('%s = 0' % iteration_indicator)
self.outdent()
- self.leave_frame(
- loop_frame, with_python_scope=node.recursive and not node.else_
- )
+ self.leave_frame(loop_frame, with_python_scope=node.recursive
+ and not node.else_)
if node.else_:
- self.writeline("if %s:" % iteration_indicator)
+ self.writeline('if %s:' % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
@@ -1198,33 +1141,33 @@ def visit_For(self, node, frame):
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
- self.write("await ")
- self.write("loop(")
+ self.write('await ')
+ self.write('loop(')
if self.environment.is_async:
- self.write("auto_aiter(")
+ self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async:
- self.write(")")
- self.write(", loop)")
+ self.write(')')
+ self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
- self.writeline("if ", node)
+ self.writeline('if ', node)
self.visit(node.test, if_frame)
- self.write(":")
+ self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
- self.writeline("elif ", elif_)
+ self.writeline('elif ', elif_)
self.visit(elif_.test, if_frame)
- self.write(":")
+ self.write(':')
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
- self.writeline("else:")
+ self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
@@ -1233,15 +1176,16 @@ def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
- if not node.name.startswith("_"):
- self.write("context.exported_vars.add(%r)" % node.name)
- self.writeline("context.vars[%r] = " % node.name)
- self.write("%s = " % frame.symbols.ref(node.name))
+ if not node.name.startswith('_'):
+ self.write('context.exported_vars.add(%r)' % node.name)
+ ref = frame.symbols.ref(node.name)
+ self.writeline('context.vars[%r] = ' % node.name)
+ self.write('%s = ' % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
- self.writeline("caller = ")
+ self.writeline('caller = ')
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
@@ -1262,10 +1206,10 @@ def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
- for target, expr in izip(node.targets, node.values):
+ for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
self.newline()
self.visit(target, with_frame)
- self.write(" = ")
+ self.write(' = ')
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
@@ -1274,187 +1218,156 @@ def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
- _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
- #: The default finalize function if the environment isn't configured
- #: with one. Or if the environment has one, this is called on that
- #: function's output for constants.
- _default_finalize = text_type
- _finalize = None
-
- def _make_finalize(self):
- """Build the finalize function to be used on constants and at
- runtime. Cached so it's only created once for all output nodes.
-
- Returns a ``namedtuple`` with the following attributes:
-
- ``const``
- A function to finalize constant data at compile time.
-
- ``src``
- Source code to output around nodes to be evaluated at
- runtime.
- """
- if self._finalize is not None:
- return self._finalize
-
- finalize = default = self._default_finalize
- src = None
+ def visit_Output(self, node, frame):
+ # if we have a known extends statement, we don't output anything
+ # if we are in a require_output_check section
+ if self.has_known_extends and frame.require_output_check:
+ return
+ allow_constant_finalize = True
if self.environment.finalize:
- src = "environment.finalize("
- env_finalize = self.environment.finalize
-
- def finalize(value):
- return default(env_finalize(value))
-
- if getattr(env_finalize, "contextfunction", False) is True:
- src += "context, "
- finalize = None # noqa: F811
- elif getattr(env_finalize, "evalcontextfunction", False) is True:
- src += "context.eval_ctx, "
- finalize = None
- elif getattr(env_finalize, "environmentfunction", False) is True:
- src += "environment, "
-
- def finalize(value):
- return default(env_finalize(self.environment, value))
-
- self._finalize = self._FinalizeInfo(finalize, src)
- return self._finalize
-
- def _output_const_repr(self, group):
- """Given a group of constant values converted from ``Output``
- child nodes, produce a string to write to the template module
- source.
- """
- return repr(concat(group))
-
- def _output_child_to_const(self, node, frame, finalize):
- """Try to optimize a child of an ``Output`` node by trying to
- convert it to constant, finalized data at compile time.
-
- If :exc:`Impossible` is raised, the node is not constant and
- will be evaluated at runtime. Any other exception will also be
- evaluated at runtime for easier debugging.
- """
- const = node.as_const(frame.eval_ctx)
-
- if frame.eval_ctx.autoescape:
- const = escape(const)
-
- # Template data doesn't go through finalize.
- if isinstance(node, nodes.TemplateData):
- return text_type(const)
-
- return finalize.const(const)
-
- def _output_child_pre(self, node, frame, finalize):
- """Output extra source code before visiting a child of an
- ``Output`` node.
- """
- if frame.eval_ctx.volatile:
- self.write("(escape if context.eval_ctx.autoescape else to_string)(")
- elif frame.eval_ctx.autoescape:
- self.write("escape(")
+ func = self.environment.finalize
+ if getattr(func, 'contextfunction', False) or \
+ getattr(func, 'evalcontextfunction', False):
+ allow_constant_finalize = False
+ elif getattr(func, 'environmentfunction', False):
+ finalize = lambda x: text_type(
+ self.environment.finalize(self.environment, x))
+ else:
+ finalize = lambda x: text_type(self.environment.finalize(x))
else:
- self.write("to_string(")
-
- if finalize.src is not None:
- self.write(finalize.src)
-
- def _output_child_post(self, node, frame, finalize):
- """Output extra source code after visiting a child of an
- ``Output`` node.
- """
- self.write(")")
-
- if finalize.src is not None:
- self.write(")")
+ finalize = text_type
- def visit_Output(self, node, frame):
- # If an extends is active, don't render outside a block.
+ # if we are inside a frame that requires output checking, we do so
+ outdent_later = False
if frame.require_output_check:
- # A top-level extends is known to exist at compile time.
- if self.has_known_extends:
- return
-
- self.writeline("if parent_template is None:")
+ self.writeline('if parent_template is None:')
self.indent()
+ outdent_later = True
- finalize = self._make_finalize()
+ # try to evaluate as many chunks as possible into a static
+ # string at compile time.
body = []
-
- # Evaluate constants at compile time if possible. Each item in
- # body will be either a list of static data or a node to be
- # evaluated at runtime.
for child in node.nodes:
try:
- if not (
- # If the finalize function requires runtime context,
- # constants can't be evaluated at compile time.
- finalize.const
- # Unless it's basic template data that won't be
- # finalized anyway.
- or isinstance(child, nodes.TemplateData)
- ):
+ if not allow_constant_finalize:
raise nodes.Impossible()
-
- const = self._output_child_to_const(child, frame, finalize)
- except (nodes.Impossible, Exception):
- # The node was not constant and needs to be evaluated at
- # runtime. Or another error was raised, which is easier
- # to debug at runtime.
+ const = child.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ body.append(child)
+ continue
+ # the frame can't be volatile here, becaus otherwise the
+ # as_const() function would raise an Impossible exception
+ # at that point.
+ try:
+ if frame.eval_ctx.autoescape:
+ if hasattr(const, '__html__'):
+ const = const.__html__()
+ else:
+ const = escape(const)
+ const = finalize(const)
+ except Exception:
+ # if something goes wrong here we evaluate the node
+ # at runtime for easier debugging
body.append(child)
continue
-
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
- if frame.buffer is not None:
- if len(body) == 1:
- self.writeline("%s.append(" % frame.buffer)
- else:
- self.writeline("%s.extend((" % frame.buffer)
-
- self.indent()
-
- for item in body:
- if isinstance(item, list):
- # A group of constant data to join and output.
- val = self._output_const_repr(item)
-
- if frame.buffer is None:
- self.writeline("yield " + val)
+ # if we have less than 3 nodes or a buffer we yield or extend/append
+ if len(body) < 3 or frame.buffer is not None:
+ if frame.buffer is not None:
+ # for one item we append, for more we extend
+ if len(body) == 1:
+ self.writeline('%s.append(' % frame.buffer)
else:
- self.writeline(val + ",")
- else:
- if frame.buffer is None:
- self.writeline("yield ", item)
+ self.writeline('%s.extend((' % frame.buffer)
+ self.indent()
+ for item in body:
+ if isinstance(item, list):
+ val = repr(concat(item))
+ if frame.buffer is None:
+ self.writeline('yield ' + val)
+ else:
+ self.writeline(val + ',')
else:
- self.newline(item)
-
- # A node to be evaluated at runtime.
- self._output_child_pre(item, frame, finalize)
- self.visit(item, frame)
- self._output_child_post(item, frame, finalize)
-
- if frame.buffer is not None:
- self.write(",")
+ if frame.buffer is None:
+ self.writeline('yield ', item)
+ else:
+ self.newline(item)
+ close = 1
+ if frame.eval_ctx.volatile:
+ self.write('(escape if context.eval_ctx.autoescape'
+ ' else to_string)(')
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ else:
+ self.write('to_string(')
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ "contextfunction", False):
+ self.write('context, ')
+ close += 1
+ self.visit(item, frame)
+ self.write(')' * close)
+ if frame.buffer is not None:
+ self.write(',')
+ if frame.buffer is not None:
+ # close the open parentheses
+ self.outdent()
+ self.writeline(len(body) == 1 and ')' or '))')
- if frame.buffer is not None:
+ # otherwise we create a format string as this is faster in that case
+ else:
+ format = []
+ arguments = []
+ for item in body:
+ if isinstance(item, list):
+ format.append(concat(item).replace('%', '%%'))
+ else:
+ format.append('%s')
+ arguments.append(item)
+ self.writeline('yield ')
+ self.write(repr(concat(format)) + ' % (')
+ self.indent()
+ for argument in arguments:
+ self.newline(argument)
+ close = 0
+ if frame.eval_ctx.volatile:
+ self.write('(escape if context.eval_ctx.autoescape else'
+ ' to_string)(')
+ close += 1
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ close += 1
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ 'contextfunction', False):
+ self.write('context, ')
+ elif getattr(self.environment.finalize,
+ 'evalcontextfunction', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(self.environment.finalize,
+ 'environmentfunction', False):
+ self.write('environment, ')
+ close += 1
+ self.visit(argument, frame)
+ self.write(')' * close + ', ')
self.outdent()
- self.writeline(")" if len(body) == 1 else "))")
+ self.writeline(')')
- if frame.require_output_check:
+ if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
- self.write(" = ")
+ self.write(' = ')
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
@@ -1471,19 +1384,20 @@ def visit_AssignBlock(self, node, frame):
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
- self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
+ self.write(' = (Markup if context.eval_ctx.autoescape '
+ 'else identity)(')
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
- self.write("concat(%s)" % block_frame.buffer)
- self.write(")")
+ self.write('concat(%s)' % block_frame.buffer)
+ self.write(')')
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
- if node.ctx == "store" and frame.toplevel:
+ if node.ctx == 'store' and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
@@ -1491,17 +1405,12 @@ def visit_Name(self, node, frame):
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
- if node.ctx == "load":
+ if node.ctx == 'load':
load = frame.symbols.find_load(ref)
- if not (
- load is not None
- and load[0] == VAR_LOAD_PARAMETER
- and not self.parameter_is_undeclared(ref)
- ):
- self.write(
- "(undefined(name=%r) if %s is missing else %s)"
- % (node.name, ref, ref)
- )
+ if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
+ not self.parameter_is_undeclared(ref)):
+ self.write('(undefined(name=%r) if %s is missing else %s)' %
+ (node.name, ref, ref))
return
self.write(ref)
@@ -1511,14 +1420,12 @@ def visit_NSRef(self, node, frame):
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
- self.writeline("if not isinstance(%s, Namespace):" % ref)
+ self.writeline('if not isinstance(%s, Namespace):' % ref)
self.indent()
- self.writeline(
- "raise TemplateRuntimeError(%r)"
- % "cannot assign attribute on non-namespace object"
- )
+ self.writeline('raise TemplateRuntimeError(%r)' %
+ 'cannot assign attribute on non-namespace object')
self.outdent()
- self.writeline("%s[%r]" % (ref, node.attr))
+ self.writeline('%s[%r]' % (ref, node.attr))
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
@@ -1531,256 +1438,230 @@ def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
- self.write(
- "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
- )
+ self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
+ % node.data)
def visit_Tuple(self, node, frame):
- self.write("(")
+ self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
- self.write(", ")
+ self.write(', ')
self.visit(item, frame)
- self.write(idx == 0 and ",)" or ")")
+ self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
- self.write("[")
+ self.write('[')
for idx, item in enumerate(node.items):
if idx:
- self.write(", ")
+ self.write(', ')
self.visit(item, frame)
- self.write("]")
+ self.write(']')
def visit_Dict(self, node, frame):
- self.write("{")
+ self.write('{')
for idx, item in enumerate(node.items):
if idx:
- self.write(", ")
+ self.write(', ')
self.visit(item.key, frame)
- self.write(": ")
+ self.write(': ')
self.visit(item.value, frame)
- self.write("}")
+ self.write('}')
- def binop(operator, interceptable=True): # noqa: B902
+ def binop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
- if (
- self.environment.sandboxed
- and operator in self.environment.intercepted_binops
- ):
- self.write("environment.call_binop(context, %r, " % operator)
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_binops:
+ self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
- self.write(", ")
+ self.write(', ')
self.visit(node.right, frame)
else:
- self.write("(")
+ self.write('(')
self.visit(node.left, frame)
- self.write(" %s " % operator)
+ self.write(' %s ' % operator)
self.visit(node.right, frame)
- self.write(")")
-
+ self.write(')')
return visitor
- def uaop(operator, interceptable=True): # noqa: B902
+ def uaop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
- if (
- self.environment.sandboxed
- and operator in self.environment.intercepted_unops
- ):
- self.write("environment.call_unop(context, %r, " % operator)
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_unops:
+ self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
- self.write("(" + operator)
+ self.write('(' + operator)
self.visit(node.node, frame)
- self.write(")")
-
+ self.write(')')
return visitor
- visit_Add = binop("+")
- visit_Sub = binop("-")
- visit_Mul = binop("*")
- visit_Div = binop("/")
- visit_FloorDiv = binop("//")
- visit_Pow = binop("**")
- visit_Mod = binop("%")
- visit_And = binop("and", interceptable=False)
- visit_Or = binop("or", interceptable=False)
- visit_Pos = uaop("+")
- visit_Neg = uaop("-")
- visit_Not = uaop("not ", interceptable=False)
+ visit_Add = binop('+')
+ visit_Sub = binop('-')
+ visit_Mul = binop('*')
+ visit_Div = binop('/')
+ visit_FloorDiv = binop('//')
+ visit_Pow = binop('**')
+ visit_Mod = binop('%')
+ visit_And = binop('and', interceptable=False)
+ visit_Or = binop('or', interceptable=False)
+ visit_Pos = uaop('+')
+ visit_Neg = uaop('-')
+ visit_Not = uaop('not ', interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
- func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
+ func_name = '(context.eval_ctx.volatile and' \
+ ' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
- func_name = "markup_join"
+ func_name = 'markup_join'
else:
- func_name = "unicode_join"
- self.write("%s((" % func_name)
+ func_name = 'unicode_join'
+ self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
- self.write(", ")
- self.write("))")
+ self.write(', ')
+ self.write('))')
@optimizeconst
def visit_Compare(self, node, frame):
- self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
- self.write(")")
def visit_Operand(self, node, frame):
- self.write(" %s " % operators[node.op])
+ self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
- if self.environment.is_async:
- self.write("(await auto_await(")
-
- self.write("environment.getattr(")
+ self.write('environment.getattr(')
self.visit(node.node, frame)
- self.write(", %r)" % node.attr)
-
- if self.environment.is_async:
- self.write("))")
+ self.write(', %r)' % node.attr)
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
- self.write("[")
+ self.write('[')
self.visit(node.arg, frame)
- self.write("]")
+ self.write(']')
else:
- if self.environment.is_async:
- self.write("(await auto_await(")
-
- self.write("environment.getitem(")
+ self.write('environment.getitem(')
self.visit(node.node, frame)
- self.write(", ")
+ self.write(', ')
self.visit(node.arg, frame)
- self.write(")")
-
- if self.environment.is_async:
- self.write("))")
+ self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
- self.write(":")
+ self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
- self.write(":")
+ self.write(':')
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
- self.write("await auto_await(")
- self.write(self.filters[node.name] + "(")
+ self.write('await auto_await(')
+ self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
- self.fail("no filter named %r" % node.name, node.lineno)
- if getattr(func, "contextfilter", False) is True:
- self.write("context, ")
- elif getattr(func, "evalcontextfilter", False) is True:
- self.write("context.eval_ctx, ")
- elif getattr(func, "environmentfilter", False) is True:
- self.write("environment, ")
+ self.fail('no filter named %r' % node.name, node.lineno)
+ if getattr(func, 'contextfilter', False):
+ self.write('context, ')
+ elif getattr(func, 'evalcontextfilter', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(func, 'environmentfilter', False):
+ self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
- self.write(
- "(context.eval_ctx.autoescape and"
- " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
- )
+ self.write('(context.eval_ctx.autoescape and'
+ ' Markup(concat(%s)) or concat(%s))' %
+ (frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
- self.write("Markup(concat(%s))" % frame.buffer)
+ self.write('Markup(concat(%s))' % frame.buffer)
else:
- self.write("concat(%s)" % frame.buffer)
+ self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
- self.write(")")
+ self.write(')')
if self.environment.is_async:
- self.write(")")
+ self.write(')')
@optimizeconst
def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + "(")
+ self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
- self.fail("no test named %r" % node.name, node.lineno)
+ self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
- self.write(")")
+ self.write(')')
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
- self.write(
- "cond_expr_undefined(%r)"
- % (
- "the inline if-"
- "expression on %s evaluated to false and "
- "no else section was defined." % self.position(node)
- )
- )
-
- self.write("(")
+ self.write('undefined(%r)' % ('the inline if-'
+ 'expression on %s evaluated to false and '
+ 'no else section was defined.' % self.position(node)))
+
+ self.write('(')
self.visit(node.expr1, frame)
- self.write(" if ")
+ self.write(' if ')
self.visit(node.test, frame)
- self.write(" else ")
+ self.write(' else ')
write_expr2()
- self.write(")")
+ self.write(')')
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
- self.write("await auto_await(")
+ self.write('await auto_await(')
if self.environment.sandboxed:
- self.write("environment.call(context, ")
+ self.write('environment.call(context, ')
else:
- self.write("context.call(")
+ self.write('context.call(')
self.visit(node.node, frame)
- extra_kwargs = forward_caller and {"caller": "caller"} or None
+ extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
- self.write(")")
+ self.write(')')
if self.environment.is_async:
- self.write(")")
+ self.write(')')
def visit_Keyword(self, node, frame):
- self.write(node.key + "=")
+ self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
- self.write("Markup(")
+ self.write('Markup(')
self.visit(node.expr, frame)
- self.write(")")
+ self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write("(context.eval_ctx.autoescape and Markup or identity)(")
+ self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
- self.write(")")
+ self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
- self.write("environment." + node.name)
+ self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
- self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
+ self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
@@ -1789,16 +1670,13 @@ def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
- self.write("context")
-
- def visit_DerivedContextReference(self, node, frame):
- self.write(self.derive_context(frame))
+ self.write('context')
def visit_Continue(self, node, frame):
- self.writeline("continue", node)
+ self.writeline('continue', node)
def visit_Break(self, node, frame):
- self.writeline("break", node)
+ self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
@@ -1809,8 +1687,8 @@ def visit_Scope(self, node, frame):
def visit_OverlayScope(self, node, frame):
ctx = self.temporary_identifier()
- self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
- self.writeline("%s.vars = " % ctx)
+ self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
+ self.writeline('%s.vars = ' % ctx)
self.visit(node.context, frame)
self.push_context_reference(ctx)
@@ -1823,7 +1701,7 @@ def visit_OverlayScope(self, node, frame):
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
- self.writeline("context.eval_ctx.%s = " % keyword.key)
+ self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
@@ -1835,9 +1713,9 @@ def visit_EvalContextModifier(self, node, frame):
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
- self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
+ self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
- self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
+ self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/constants.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/constants.py
old mode 100644
new mode 100755
index bf7f2ca7..11efd1ed
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/constants.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/constants.py
@@ -1,6 +1,17 @@
# -*- coding: utf-8 -*-
+"""
+ jinja.constants
+ ~~~~~~~~~~~~~~~
+
+ Various constants.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u"""\
+LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
@@ -18,4 +29,4 @@
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-viverra volutpat vulputate"""
+viverra volutpat vulputate'''
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/debug.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/debug.py
old mode 100644
new mode 100755
index 5d8aec31..b61139f0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/debug.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/debug.py
@@ -1,268 +1,372 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.debug
+ ~~~~~~~~~~~~
+
+ Implements the debug interface for Jinja. This module does some pretty
+ ugly stuff with the Python traceback system in order to achieve tracebacks
+ with correct line numbers, locals and contents.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
import sys
-from types import CodeType
+import traceback
+from types import TracebackType, CodeType
+from jinja2.utils import missing, internal_code
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2._compat import iteritems, reraise, PY2
-from . import TemplateSyntaxError
-from ._compat import PYPY
-from .utils import internal_code
-from .utils import missing
+# on pypy we can take advantage of transparent proxies
+try:
+ from __pypy__ import tproxy
+except ImportError:
+ tproxy = None
-def rewrite_traceback_stack(source=None):
- """Rewrite the current exception to replace any tracebacks from
- within compiled template code with tracebacks that look like they
- came from the template source.
+# how does the raise helper look like?
+try:
+ exec("raise TypeError, 'foo'")
+except SyntaxError:
+ raise_helper = 'raise __jinja_exception__[1]'
+except TypeError:
+ raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
- This must be called within an ``except`` block.
- :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
- the current ``exc_info`` is used.
- :param source: For ``TemplateSyntaxError``, the original source if
- known.
- :return: A :meth:`sys.exc_info` tuple that can be re-raised.
- """
- exc_type, exc_value, tb = sys.exc_info()
+class TracebackFrameProxy(object):
+ """Proxies a traceback frame."""
- if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
- exc_value.translated = True
- exc_value.source = source
+ def __init__(self, tb):
+ self.tb = tb
+ self._tb_next = None
- try:
- # Remove the old traceback on Python 3, otherwise the frames
- # from the compiler still show up.
- exc_value.with_traceback(None)
- except AttributeError:
- pass
+ @property
+ def tb_next(self):
+ return self._tb_next
- # Outside of runtime, so the frame isn't executing template
- # code, but it still needs to point at the template.
- tb = fake_traceback(
- exc_value, None, exc_value.filename or "", exc_value.lineno
+ def set_next(self, next):
+ if tb_set_next is not None:
+ try:
+ tb_set_next(self.tb, next and next.tb or None)
+ except Exception:
+ # this function can fail due to all the hackery it does
+ # on various python implementations. We just catch errors
+ # down and ignore them if necessary.
+ pass
+ self._tb_next = next
+
+ @property
+ def is_jinja_frame(self):
+ return '__jinja_template__' in self.tb.tb_frame.f_globals
+
+ def __getattr__(self, name):
+ return getattr(self.tb, name)
+
+
+def make_frame_proxy(frame):
+ proxy = TracebackFrameProxy(frame)
+ if tproxy is None:
+ return proxy
+ def operation_handler(operation, *args, **kwargs):
+ if operation in ('__getattribute__', '__getattr__'):
+ return getattr(proxy, args[0])
+ elif operation == '__setattr__':
+ proxy.__setattr__(*args, **kwargs)
+ else:
+ return getattr(proxy, operation)(*args, **kwargs)
+ return tproxy(TracebackType, operation_handler)
+
+
+class ProcessedTraceback(object):
+ """Holds a Jinja preprocessed traceback for printing or reraising."""
+
+ def __init__(self, exc_type, exc_value, frames):
+ assert frames, 'no frames for this traceback?'
+ self.exc_type = exc_type
+ self.exc_value = exc_value
+ self.frames = frames
+
+ # newly concatenate the frames (which are proxies)
+ prev_tb = None
+ for tb in self.frames:
+ if prev_tb is not None:
+ prev_tb.set_next(tb)
+ prev_tb = tb
+ prev_tb.set_next(None)
+
+ def render_as_text(self, limit=None):
+ """Return a string with the traceback."""
+ lines = traceback.format_exception(self.exc_type, self.exc_value,
+ self.frames[0], limit=limit)
+ return ''.join(lines).rstrip()
+
+ def render_as_html(self, full=False):
+ """Return a unicode string with the traceback as rendered HTML."""
+ from jinja2.debugrenderer import render_traceback
+ return u'%s\n\n' % (
+ render_traceback(self, full=full),
+ self.render_as_text().decode('utf-8', 'replace')
)
+
+ @property
+ def is_template_syntax_error(self):
+ """`True` if this is a template syntax error."""
+ return isinstance(self.exc_value, TemplateSyntaxError)
+
+ @property
+ def exc_info(self):
+ """Exception info tuple with a proxy around the frame objects."""
+ return self.exc_type, self.exc_value, self.frames[0]
+
+ @property
+ def standard_exc_info(self):
+ """Standard python exc_info for re-raising"""
+ tb = self.frames[0]
+ # the frame will be an actual traceback (or transparent proxy) if
+ # we are on pypy or a python implementation with support for tproxy
+ if type(tb) is not TracebackType:
+ tb = tb.tb
+ return self.exc_type, self.exc_value, tb
+
+
+def make_traceback(exc_info, source_hint=None):
+ """Creates a processed traceback object from the exc_info."""
+ exc_type, exc_value, tb = exc_info
+ if isinstance(exc_value, TemplateSyntaxError):
+ exc_info = translate_syntax_error(exc_value, source_hint)
+ initial_skip = 0
else:
- # Skip the frame for the render function.
- tb = tb.tb_next
+ initial_skip = 1
+ return translate_exception(exc_info, initial_skip)
+
+
+def translate_syntax_error(error, source=None):
+ """Rewrites a syntax error to please traceback systems."""
+ error.source = source
+ error.translated = True
+ exc_info = (error.__class__, error, None)
+ filename = error.filename
+ if filename is None:
+ filename = ''
+ return fake_exc_info(exc_info, filename, error.lineno)
- stack = []
- # Build the stack of traceback object, replacing any in template
- # code with the source file and line information.
+def translate_exception(exc_info, initial_skip=0):
+ """If passed an exc_info it will automatically rewrite the exceptions
+ all the way down to the correct line numbers and frames.
+ """
+ tb = exc_info[2]
+ frames = []
+
+ # skip some internal frames if wanted
+ for x in range(initial_skip):
+ if tb is not None:
+ tb = tb.tb_next
+ initial_tb = tb
+
while tb is not None:
- # Skip frames decorated with @internalcode. These are internal
- # calls that aren't useful in template debugging output.
+ # skip frames decorated with @internalcode. These are internal
+ # calls we can't avoid and that are useless in template debugging
+ # output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
- template = tb.tb_frame.f_globals.get("__jinja_template__")
+ # save a reference to the next frame if we override the current
+ # one with a faked one.
+ next = tb.tb_next
+ # fake template exceptions
+ template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
- fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
- stack.append(fake_tb)
- else:
- stack.append(tb)
-
- tb = tb.tb_next
-
- tb_next = None
-
- # Assign tb_next in reverse to avoid circular references.
- for tb in reversed(stack):
- tb_next = tb_set_next(tb, tb_next)
-
- return exc_type, exc_value, tb_next
-
+ tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
+ lineno)[2]
-def fake_traceback(exc_value, tb, filename, lineno):
- """Produce a new traceback object that looks like it came from the
- template source instead of the compiled code. The filename, line
- number, and location name will point to the template, and the local
- variables will be the current template context.
-
- :param exc_value: The original exception to be re-raised to create
- the new traceback.
- :param tb: The original traceback to get the local variables and
- code info from.
- :param filename: The template filename.
- :param lineno: The line number in the template source.
- """
- if tb is not None:
- # Replace the real locals with the context that would be
- # available at that point in the template.
- locals = get_template_locals(tb.tb_frame.f_locals)
- locals.pop("__jinja_exception__", None)
- else:
- locals = {}
-
- globals = {
- "__name__": filename,
- "__file__": filename,
- "__jinja_exception__": exc_value,
- }
- # Raise an exception at the correct line number.
- code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
-
- # Build a new code object that points to the template file and
- # replaces the location with a block name.
- try:
- location = "template"
-
- if tb is not None:
- function = tb.tb_frame.f_code.co_name
-
- if function == "root":
- location = "top-level template code"
- elif function.startswith("block_"):
- location = 'block "%s"' % function[6:]
-
- # Collect arguments for the new code object. CodeType only
- # accepts positional arguments, and arguments were inserted in
- # new Python versions.
- code_args = []
-
- for attr in (
- "argcount",
- "posonlyargcount", # Python 3.8
- "kwonlyargcount", # Python 3
- "nlocals",
- "stacksize",
- "flags",
- "code", # codestring
- "consts", # constants
- "names",
- "varnames",
- ("filename", filename),
- ("name", location),
- "firstlineno",
- "lnotab",
- "freevars",
- "cellvars",
- ):
- if isinstance(attr, tuple):
- # Replace with given value.
- code_args.append(attr[1])
- continue
-
- try:
- # Copy original value if it exists.
- code_args.append(getattr(code, "co_" + attr))
- except AttributeError:
- # Some arguments were added later.
- continue
-
- code = CodeType(*code_args)
- except Exception:
- # Some environments such as Google App Engine don't support
- # modifying code objects.
- pass
+ frames.append(make_frame_proxy(tb))
+ tb = next
- # Execute the new code, which is guaranteed to raise, and return
- # the new traceback without this frame.
- try:
- exec(code, globals, locals)
- except BaseException:
- return sys.exc_info()[2].tb_next
+ # if we don't have any exceptions in the frames left, we have to
+ # reraise it unchanged.
+ # XXX: can we backup here? when could this happen?
+ if not frames:
+ reraise(exc_info[0], exc_info[1], exc_info[2])
+ return ProcessedTraceback(exc_info[0], exc_info[1], frames)
-def get_template_locals(real_locals):
- """Based on the runtime locals, get the context that would be
- available at that point in the template.
- """
- # Start with the current template context.
- ctx = real_locals.get("context")
+def get_jinja_locals(real_locals):
+ ctx = real_locals.get('context')
if ctx:
- data = ctx.get_all().copy()
+ locals = ctx.get_all().copy()
else:
- data = {}
+ locals = {}
- # Might be in a derived context that only sets local variables
- # rather than pushing a context. Local variables follow the scheme
- # l_depth_name. Find the highest-depth local that has a value for
- # each name.
local_overrides = {}
- for name, value in real_locals.items():
- if not name.startswith("l_") or value is missing:
- # Not a template variable, or no longer relevant.
+ for name, value in iteritems(real_locals):
+ if not name.startswith('l_') or value is missing:
continue
-
try:
- _, depth, name = name.split("_", 2)
+ _, depth, name = name.split('_', 2)
depth = int(depth)
except ValueError:
continue
-
cur_depth = local_overrides.get(name, (-1,))[0]
-
if cur_depth < depth:
local_overrides[name] = (depth, value)
- # Modify the context with any derived context.
- for name, (_, value) in local_overrides.items():
+ for name, (_, value) in iteritems(local_overrides):
if value is missing:
- data.pop(name, None)
+ locals.pop(name, None)
else:
- data[name] = value
+ locals[name] = value
- return data
+ return locals
-if sys.version_info >= (3, 7):
- # tb_next is directly assignable as of Python 3.7
- def tb_set_next(tb, tb_next):
- tb.tb_next = tb_next
- return tb
+def fake_exc_info(exc_info, filename, lineno):
+ """Helper for `translate_exception`."""
+ exc_type, exc_value, tb = exc_info
+ # figure the real context out
+ if tb is not None:
+ locals = get_jinja_locals(tb.tb_frame.f_locals)
-elif PYPY:
- # PyPy might have special support, and won't work with ctypes.
- try:
- import tputil
- except ImportError:
- # Without tproxy support, use the original traceback.
- def tb_set_next(tb, tb_next):
- return tb
-
+ # if there is a local called __jinja_exception__, we get
+ # rid of it to not break the debug functionality.
+ locals.pop('__jinja_exception__', None)
else:
- # With tproxy support, create a proxy around the traceback that
- # returns the new tb_next.
- def tb_set_next(tb, tb_next):
- def controller(op):
- if op.opname == "__getattribute__" and op.args[0] == "tb_next":
- return tb_next
+ locals = {}
- return op.delegate()
+ # assamble fake globals we need
+ globals = {
+ '__name__': filename,
+ '__file__': filename,
+ '__jinja_exception__': exc_info[:2],
+
+ # we don't want to keep the reference to the template around
+ # to not cause circular dependencies, but we mark it as Jinja
+ # frame for the ProcessedTraceback
+ '__jinja_template__': None
+ }
- return tputil.make_proxy(controller, obj=tb)
+ # and fake the exception
+ code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
+ # if it's possible, change the name of the code. This won't work
+ # on some python environments such as google appengine
+ try:
+ if tb is None:
+ location = 'template'
+ else:
+ function = tb.tb_frame.f_code.co_name
+ if function == 'root':
+ location = 'top-level template code'
+ elif function.startswith('block_'):
+ location = 'block "%s"' % function[6:]
+ else:
+ location = 'template'
+
+ if PY2:
+ code = CodeType(0, code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ else:
+ code = CodeType(0, code.co_kwonlyargcount,
+ code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ except Exception as e:
+ pass
+
+ # execute the code and catch the new traceback
+ try:
+ exec(code, globals, locals)
+ except:
+ exc_info = sys.exc_info()
+ new_tb = exc_info[2].tb_next
+
+ # return without this frame
+ return exc_info[:2] + (new_tb,)
-else:
- # Use ctypes to assign tb_next at the C level since it's read-only
- # from Python.
+
+def _init_ugly_crap():
+ """This function implements a few ugly things so that we can patch the
+ traceback objects. The function returned allows resetting `tb_next` on
+ any python traceback object. Do not attempt to use this on non cpython
+ interpreters
+ """
import ctypes
+ from types import TracebackType
- class _CTraceback(ctypes.Structure):
- _fields_ = [
- # Extra PyObject slots when compiled with Py_TRACE_REFS.
- ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
- # Only care about tb_next as an object, not a traceback.
- ("tb_next", ctypes.py_object),
- ]
+ if PY2:
+ # figure out size of _Py_ssize_t for Python 2:
+ if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
+ _Py_ssize_t = ctypes.c_int64
+ else:
+ _Py_ssize_t = ctypes.c_int
+ else:
+ # platform ssize_t on Python 3
+ _Py_ssize_t = ctypes.c_ssize_t
- def tb_set_next(tb, tb_next):
- c_tb = _CTraceback.from_address(id(tb))
+ # regular python
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
+
+ # python with trace
+ if hasattr(sys, 'getobjects'):
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('_ob_next', ctypes.POINTER(_PyObject)),
+ ('_ob_prev', ctypes.POINTER(_PyObject)),
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
- # Clear out the old tb_next.
+ class _Traceback(_PyObject):
+ pass
+ _Traceback._fields_ = [
+ ('tb_next', ctypes.POINTER(_Traceback)),
+ ('tb_frame', ctypes.POINTER(_PyObject)),
+ ('tb_lasti', ctypes.c_int),
+ ('tb_lineno', ctypes.c_int)
+ ]
+
+ def tb_set_next(tb, next):
+ """Set the tb_next attribute of a traceback object."""
+ if not (isinstance(tb, TracebackType) and
+ (next is None or isinstance(next, TracebackType))):
+ raise TypeError('tb_set_next arguments must be traceback objects')
+ obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
- c_tb_next = ctypes.py_object(tb.tb_next)
- c_tb.tb_next = ctypes.py_object()
- ctypes.pythonapi.Py_DecRef(c_tb_next)
+ old = _Traceback.from_address(id(tb.tb_next))
+ old.ob_refcnt -= 1
+ if next is None:
+ obj.tb_next = ctypes.POINTER(_Traceback)()
+ else:
+ next = _Traceback.from_address(id(next))
+ next.ob_refcnt += 1
+ obj.tb_next = ctypes.pointer(next)
+
+ return tb_set_next
- # Assign the new tb_next.
- if tb_next is not None:
- c_tb_next = ctypes.py_object(tb_next)
- ctypes.pythonapi.Py_IncRef(c_tb_next)
- c_tb.tb_next = c_tb_next
- return tb
+# try to get a tb_set_next implementation if we don't have transparent
+# proxies.
+tb_set_next = None
+if tproxy is None:
+ try:
+ tb_set_next = _init_ugly_crap()
+ except:
+ pass
+ del _init_ugly_crap
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/defaults.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/defaults.py
old mode 100644
new mode 100755
index 8e0e7d77..7c93dec0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/defaults.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/defaults.py
@@ -1,44 +1,56 @@
# -*- coding: utf-8 -*-
-from ._compat import range_type
-from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
-from .tests import TESTS as DEFAULT_TESTS # noqa: F401
-from .utils import Cycler
-from .utils import generate_lorem_ipsum
-from .utils import Joiner
-from .utils import Namespace
+"""
+ jinja2.defaults
+ ~~~~~~~~~~~~~~~
+
+ Jinja default filters and tags.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import range_type
+from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
+
# defaults for the parser / lexer
-BLOCK_START_STRING = "{%"
-BLOCK_END_STRING = "%}"
-VARIABLE_START_STRING = "{{"
-VARIABLE_END_STRING = "}}"
-COMMENT_START_STRING = "{#"
-COMMENT_END_STRING = "#}"
+BLOCK_START_STRING = '{%'
+BLOCK_END_STRING = '%}'
+VARIABLE_START_STRING = '{{'
+VARIABLE_END_STRING = '}}'
+COMMENT_START_STRING = '{#'
+COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = "\n"
+NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
-# default filters, tests and namespace
+# default filters, tests and namespace
+from jinja2.filters import FILTERS as DEFAULT_FILTERS
+from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
- "range": range_type,
- "dict": dict,
- "lipsum": generate_lorem_ipsum,
- "cycler": Cycler,
- "joiner": Joiner,
- "namespace": Namespace,
+ 'range': range_type,
+ 'dict': dict,
+ 'lipsum': generate_lorem_ipsum,
+ 'cycler': Cycler,
+ 'joiner': Joiner,
+ 'namespace': Namespace
}
+
# default policies
DEFAULT_POLICIES = {
- "compiler.ascii_str": True,
- "urlize.rel": "noopener",
- "urlize.target": None,
- "truncate.leeway": 5,
- "json.dumps_function": None,
- "json.dumps_kwargs": {"sort_keys": True},
- "ext.i18n.trimmed": False,
+ 'compiler.ascii_str': True,
+ 'urlize.rel': 'noopener',
+ 'urlize.target': None,
+ 'truncate.leeway': 5,
+ 'json.dumps_function': None,
+ 'json.dumps_kwargs': {'sort_keys': True},
+ 'ext.i18n.trimmed': False,
}
+
+
+# export all constants
+__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/environment.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/environment.py
old mode 100644
new mode 100755
index 8430390e..549d9afa
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/environment.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/environment.py
@@ -1,83 +1,60 @@
# -*- coding: utf-8 -*-
-"""Classes for managing templates and their runtime and compile time
-options.
+"""
+ jinja2.environment
+ ~~~~~~~~~~~~~~~~~~
+
+ Provides a class that holds runtime and parsing time options.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
-from functools import partial
-from functools import reduce
-
-from markupsafe import Markup
-
-from . import nodes
-from ._compat import encode_filename
-from ._compat import implements_iterator
-from ._compat import implements_to_string
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import PYPY
-from ._compat import reraise
-from ._compat import string_types
-from ._compat import text_type
-from .compiler import CodeGenerator
-from .compiler import generate
-from .defaults import BLOCK_END_STRING
-from .defaults import BLOCK_START_STRING
-from .defaults import COMMENT_END_STRING
-from .defaults import COMMENT_START_STRING
-from .defaults import DEFAULT_FILTERS
-from .defaults import DEFAULT_NAMESPACE
-from .defaults import DEFAULT_POLICIES
-from .defaults import DEFAULT_TESTS
-from .defaults import KEEP_TRAILING_NEWLINE
-from .defaults import LINE_COMMENT_PREFIX
-from .defaults import LINE_STATEMENT_PREFIX
-from .defaults import LSTRIP_BLOCKS
-from .defaults import NEWLINE_SEQUENCE
-from .defaults import TRIM_BLOCKS
-from .defaults import VARIABLE_END_STRING
-from .defaults import VARIABLE_START_STRING
-from .exceptions import TemplateNotFound
-from .exceptions import TemplateRuntimeError
-from .exceptions import TemplatesNotFound
-from .exceptions import TemplateSyntaxError
-from .exceptions import UndefinedError
-from .lexer import get_lexer
-from .lexer import TokenStream
-from .nodes import EvalContext
-from .parser import Parser
-from .runtime import Context
-from .runtime import new_context
-from .runtime import Undefined
-from .utils import concat
-from .utils import consume
-from .utils import have_async_gen
-from .utils import import_string
-from .utils import internalcode
-from .utils import LRUCache
-from .utils import missing
+from functools import reduce, partial
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
+ DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.lexer import get_lexer, TokenStream
+from jinja2.parser import Parser
+from jinja2.nodes import EvalContext
+from jinja2.compiler import generate, CodeGenerator
+from jinja2.runtime import Undefined, new_context, Context
+from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
+ TemplatesNotFound, TemplateRuntimeError
+from jinja2.utils import import_string, LRUCache, Markup, missing, \
+ concat, consume, internalcode, have_async_gen
+from jinja2._compat import imap, ifilter, string_types, iteritems, \
+ text_type, reraise, implements_iterator, implements_to_string, \
+ encode_filename, PY2, PYPY
+
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
+# the function to create jinja traceback objects. This is dynamically
+# imported on the first exception in the exception handler.
+_make_traceback = None
-def get_spontaneous_environment(cls, *args):
- """Return a new spontaneous environment. A spontaneous environment
- is used for templates created directly rather than through an
- existing environment.
- :param cls: Environment class to create.
- :param args: Positional arguments passed to environment.
+def get_spontaneous_environment(*args):
+ """Return a new spontaneous environment. A spontaneous environment is an
+ unnamed and unaccessible (in theory) environment that is used for
+ templates generated from a string and not from the file system.
"""
- key = (cls, args)
-
try:
- return _spontaneous_environments[key]
- except KeyError:
- _spontaneous_environments[key] = env = cls(*args)
- env.shared = True
+ env = _spontaneous_environments.get(args)
+ except TypeError:
+ return Environment(*args)
+ if env is not None:
return env
+ _spontaneous_environments[args] = env = Environment(*args)
+ env.shared = True
+ return env
def create_cache(size):
@@ -116,25 +93,20 @@ def fail_for_missing_callable(string, name):
try:
name._fail_with_undefined_error()
except Exception as e:
- msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
+ msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
raise TemplateRuntimeError(msg)
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
- assert issubclass(
- environment.undefined, Undefined
- ), "undefined must be a subclass of undefined because filters depend on it."
- assert (
- environment.block_start_string
- != environment.variable_start_string
- != environment.comment_start_string
- ), "block, variable and comment start strings must be different"
- assert environment.newline_sequence in (
- "\r",
- "\r\n",
- "\n",
- ), "newline_sequence set to unknown line ending string."
+ assert issubclass(environment.undefined, Undefined), 'undefined must ' \
+ 'be a subclass of undefined because filters depend on it.'
+ assert environment.block_start_string != \
+ environment.variable_start_string != \
+ environment.comment_start_string, 'block, variable and comment ' \
+ 'start strings must be different'
+ assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+ 'newline_sequence set to unknown line ending string.'
return environment
@@ -219,7 +191,7 @@ class Environment(object):
`autoescape`
If set to ``True`` the XML/HTML autoescaping feature is enabled by
default. For more details about autoescaping see
- :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
+ :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return ``True`` or ``False`` depending on autoescape should be
enabled by default.
@@ -277,6 +249,10 @@ class Environment(object):
#: must not be modified
shared = False
+ #: these are currently EXPERIMENTAL undocumented features.
+ exception_handler = None
+ exception_formatter = None
+
#: the class that is used for code generation. See
#: :class:`~jinja2.compiler.CodeGenerator` for more information.
code_generator_class = CodeGenerator
@@ -285,31 +261,29 @@ class Environment(object):
#: :class:`~jinja2.runtime.Context` for more information.
context_class = Context
- def __init__(
- self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False,
- ):
+ def __init__(self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None,
+ enable_async=False):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
@@ -360,9 +334,6 @@ def __init__(
self.enable_async = enable_async
self.is_async = self.enable_async and have_async_gen
- if self.is_async:
- # runs patch_all() to enable async support
- from . import asyncsupport # noqa: F401
_environment_sanity_check(self)
@@ -382,28 +353,15 @@ def extend(self, **attributes):
if not hasattr(self, key):
setattr(self, key, value)
- def overlay(
- self,
- block_start_string=missing,
- block_end_string=missing,
- variable_start_string=missing,
- variable_end_string=missing,
- comment_start_string=missing,
- comment_end_string=missing,
- line_statement_prefix=missing,
- line_comment_prefix=missing,
- trim_blocks=missing,
- lstrip_blocks=missing,
- extensions=missing,
- optimized=missing,
- undefined=missing,
- finalize=missing,
- autoescape=missing,
- loader=missing,
- cache_size=missing,
- auto_reload=missing,
- bytecode_cache=missing,
- ):
+ def overlay(self, block_start_string=missing, block_end_string=missing,
+ variable_start_string=missing, variable_end_string=missing,
+ comment_start_string=missing, comment_end_string=missing,
+ line_statement_prefix=missing, line_comment_prefix=missing,
+ trim_blocks=missing, lstrip_blocks=missing,
+ extensions=missing, optimized=missing,
+ undefined=missing, finalize=missing, autoescape=missing,
+ loader=missing, cache_size=missing, auto_reload=missing,
+ bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
@@ -416,7 +374,7 @@ def overlay(
through.
"""
args = dict(locals())
- del args["self"], args["cache_size"], args["extensions"]
+ del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
@@ -444,7 +402,8 @@ def overlay(
def iter_extensions(self):
"""Iterates over the extensions by priority."""
- return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
+ return iter(sorted(self.extensions.values(),
+ key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
@@ -476,9 +435,8 @@ def getattr(self, obj, attribute):
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
- def call_filter(
- self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
- ):
+ def call_filter(self, name, value, args=None, kwargs=None,
+ context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
Note that on Python 3 this might return a coroutine in case the
@@ -490,22 +448,21 @@ def call_filter(
"""
func = self.filters.get(name)
if func is None:
- fail_for_missing_callable("no filter named %r", name)
+ fail_for_missing_callable('no filter named %r', name)
args = [value] + list(args or ())
- if getattr(func, "contextfilter", False) is True:
+ if getattr(func, 'contextfilter', False):
if context is None:
- raise TemplateRuntimeError(
- "Attempted to invoke context filter without context"
- )
+ raise TemplateRuntimeError('Attempted to invoke context '
+ 'filter without context')
args.insert(0, context)
- elif getattr(func, "evalcontextfilter", False) is True:
+ elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
- elif getattr(func, "environmentfilter", False) is True:
+ elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
@@ -516,7 +473,7 @@ def call_test(self, name, value, args=None, kwargs=None):
"""
func = self.tests.get(name)
if func is None:
- fail_for_missing_callable("no test named %r", name)
+ fail_for_missing_callable('no test named %r', name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
@@ -526,13 +483,14 @@ def parse(self, source, name=None, filename=None):
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
- If you are :ref:`developing Jinja extensions `
+ If you are :ref:`developing Jinja2 extensions `
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
- self.handle_exception(source=source)
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
@@ -552,18 +510,16 @@ def lex(self, source, name=None, filename=None):
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
- self.handle_exception(source=source)
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
- return reduce(
- lambda s, e: e.preprocess(s, name, filename),
- self.iter_extensions(),
- text_type(source),
- )
+ return reduce(lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
@@ -583,14 +539,8 @@ def _generate(self, source, name, filename, defer_init=False):
.. versionadded:: 2.5
"""
- return generate(
- source,
- self,
- name,
- filename,
- defer_init=defer_init,
- optimized=self.optimized,
- )
+ return generate(source, self, name, filename, defer_init=defer_init,
+ optimized=self.optimized)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
@@ -598,10 +548,11 @@ def _compile(self, source, filename):
.. versionadded:: 2.5
"""
- return compile(source, filename, "exec")
+ return compile(source, filename, 'exec')
@internalcode
- def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
+ def compile(self, source, name=None, filename=None, raw=False,
+ defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
@@ -626,16 +577,18 @@ def compile(self, source, name=None, filename=None, raw=False, defer_init=False)
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
- source = self._generate(source, name, filename, defer_init=defer_init)
+ source = self._generate(source, name, filename,
+ defer_init=defer_init)
if raw:
return source
if filename is None:
- filename = ""
+ filename = ''
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
- self.handle_exception(source=source_hint)
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source_hint)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
@@ -665,32 +618,26 @@ def compile_expression(self, source, undefined_to_none=True):
.. versionadded:: 2.1
"""
- parser = Parser(self, source, state="variable")
+ parser = Parser(self, source, state='variable')
+ exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
- raise TemplateSyntaxError(
- "chunk after expression", parser.stream.current.lineno, None, None
- )
+ raise TemplateSyntaxError('chunk after expression',
+ parser.stream.current.lineno,
+ None, None)
expr.set_environment(self)
except TemplateSyntaxError:
- if sys.exc_info() is not None:
- self.handle_exception(source=source)
-
- body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
+ exc_info = sys.exc_info()
+ if exc_info is not None:
+ self.handle_exception(exc_info, source_hint=source)
+ body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
- def compile_templates(
- self,
- target,
- extensions=None,
- filter_func=None,
- zip="deflated",
- log_function=None,
- ignore_errors=True,
- py_compile=False,
- ):
+ def compile_templates(self, target, extensions=None, filter_func=None,
+ zip='deflated', log_function=None,
+ ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
@@ -713,52 +660,42 @@ def compile_templates(
.. versionadded:: 2.4
"""
- from .loaders import ModuleLoader
+ from jinja2.loaders import ModuleLoader
if log_function is None:
-
- def log_function(x):
- pass
+ log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
- import warnings
-
- warnings.warn(
- "'py_compile=True' has no effect on PyPy or Python"
- " 3 and will be removed in version 3.0",
- DeprecationWarning,
- stacklevel=2,
- )
+ from warnings import warn
+ warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp
import marshal
-
- py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
+ py_header = imp.get_magic() + \
+ u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
- py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
+ py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
- def write_file(filename, data):
+ def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
- if isinstance(data, text_type):
- data = data.encode("utf8")
-
- with open(os.path.join(target, filename), "wb") as f:
+ f = open(os.path.join(target, filename), mode)
+ try:
f.write(data)
+ finally:
+ f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
-
- zip_file = ZipFile(
- target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
- )
+ zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
+ stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
@@ -780,16 +717,18 @@ def write_file(filename, data):
if py_compile:
c = self._compile(code, encode_filename(filename))
- write_file(filename + "c", py_header + marshal.dumps(c))
- log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
+ write_file(filename + 'c', py_header +
+ marshal.dumps(c), 'wb')
+ log_function('Byte-compiled "%s" as %s' %
+ (name, filename + 'c'))
else:
- write_file(filename, code)
+ write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
- log_function("Finished compiling templates")
+ log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
@@ -807,29 +746,38 @@ def list_templates(self, extensions=None, filter_func=None):
.. versionadded:: 2.4
"""
- names = self.loader.list_templates()
-
+ x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
- raise TypeError(
- "either extensions or filter_func can be passed, but not both"
- )
-
- def filter_func(x):
- return "." in x and x.rsplit(".", 1)[1] in extensions
-
+ raise TypeError('either extensions or filter_func '
+ 'can be passed, but not both')
+ filter_func = lambda x: '.' in x and \
+ x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
- names = [name for name in names if filter_func(name)]
-
- return names
+ x = list(ifilter(filter_func, x))
+ return x
- def handle_exception(self, source=None):
+ def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
- from .debug import rewrite_traceback_stack
-
- reraise(*rewrite_traceback_stack(source=source))
+ global _make_traceback
+ if exc_info is None:
+ exc_info = sys.exc_info()
+
+ # the debugging module is imported when it's used for the first time.
+ # we're doing a lot of stuff there and for applications that do not
+ # get any exceptions in template rendering there is no need to load
+ # all of that.
+ if _make_traceback is None:
+ from jinja2.debug import make_traceback as _make_traceback
+ traceback = _make_traceback(exc_info, source_hint)
+ if rendered and self.exception_formatter is not None:
+ return self.exception_formatter(traceback)
+ if self.exception_handler is not None:
+ self.exception_handler(traceback)
+ exc_type, exc_value, tb = traceback.standard_exc_info
+ reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
@@ -846,13 +794,12 @@ def join_path(self, template, parent):
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
- raise TypeError("no loader for this environment specified")
+ raise TypeError('no loader for this environment specified')
cache_key = (weakref.ref(self.loader), name)
if self.cache is not None:
template = self.cache.get(cache_key)
- if template is not None and (
- not self.auto_reload or template.is_up_to_date
- ):
+ if template is not None and (not self.auto_reload or
+ template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
@@ -888,24 +835,15 @@ def select_template(self, names, parent=None, globals=None):
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
- .. versionchanged:: 2.11
- If names is :class:`Undefined`, an :exc:`UndefinedError` is
- raised instead. If no templates were found and names
- contains :class:`Undefined`, the message is more helpful.
+ .. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
-
- .. versionadded:: 2.3
"""
- if isinstance(names, Undefined):
- names._fail_with_undefined_error()
-
if not names:
- raise TemplatesNotFound(
- message=u"Tried to select from an empty list " u"of templates."
- )
+ raise TemplatesNotFound(message=u'Tried to select from an empty list '
+ u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
@@ -914,19 +852,20 @@ def select_template(self, names, parent=None, globals=None):
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
- except (TemplateNotFound, UndefinedError):
+ except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
- def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
+ def get_or_select_template(self, template_name_or_list,
+ parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
- if isinstance(template_name_or_list, (string_types, Undefined)):
+ if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
@@ -977,57 +916,32 @@ class Template(object):
StopIteration
"""
- #: Type of environment to create when creating a template directly
- #: rather than through an existing environment.
- environment_class = Environment
-
- def __new__(
- cls,
- source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False,
- ):
+ def __new__(cls, source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ enable_async=False):
env = get_spontaneous_environment(
- cls.environment_class,
- block_start_string,
- block_end_string,
- variable_start_string,
- variable_end_string,
- comment_start_string,
- comment_end_string,
- line_statement_prefix,
- line_comment_prefix,
- trim_blocks,
- lstrip_blocks,
- newline_sequence,
- keep_trailing_newline,
- frozenset(extensions),
- optimized,
- undefined,
- finalize,
- autoescape,
- None,
- 0,
- False,
- None,
- enable_async,
- )
+ block_start_string, block_end_string, variable_start_string,
+ variable_end_string, comment_start_string, comment_end_string,
+ line_statement_prefix, line_comment_prefix, trim_blocks,
+ lstrip_blocks, newline_sequence, keep_trailing_newline,
+ frozenset(extensions), optimized, undefined, finalize, autoescape,
+ None, 0, False, None, enable_async)
return env.from_string(source, template_class=cls)
@classmethod
@@ -1035,7 +949,10 @@ def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
- namespace = {"environment": environment, "__file__": code.co_filename}
+ namespace = {
+ 'environment': environment,
+ '__file__': code.co_filename
+ }
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
@@ -1055,21 +972,21 @@ def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
- t.name = namespace["name"]
- t.filename = namespace["__file__"]
- t.blocks = namespace["blocks"]
+ t.name = namespace['name']
+ t.filename = namespace['__file__']
+ t.blocks = namespace['blocks']
# render function and module
- t.root_render_func = namespace["root"]
+ t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
- t._debug_info = namespace["debug_info"]
+ t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
- namespace["environment"] = environment
- namespace["__jinja_template__"] = t
+ namespace['environment'] = environment
+ namespace['__jinja_template__'] = t
return t
@@ -1087,7 +1004,8 @@ def render(self, *args, **kwargs):
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
- self.environment.handle_exception()
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
def render_async(self, *args, **kwargs):
"""This works similar to :meth:`render` but returns a coroutine
@@ -1099,9 +1017,8 @@ def render_async(self, *args, **kwargs):
await template.render_async(knights='that say nih; asynchronously')
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
- )
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
@@ -1122,28 +1039,29 @@ def generate(self, *args, **kwargs):
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- yield self.environment.handle_exception()
+ exc_info = sys.exc_info()
+ else:
+ return
+ yield self.environment.handle_exception(exc_info, True)
def generate_async(self, *args, **kwargs):
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
- )
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
- is passed as is to the context without adding the globals.
+ is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
- return new_context(
- self.environment, self.name, self.blocks, vars, shared, self.globals, locals
- )
+ return new_context(self.environment, self.name, self.blocks,
+ vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
@@ -1156,14 +1074,13 @@ def make_module(self, vars=None, shared=False, locals=None):
def make_module_async(self, vars=None, shared=False, locals=None):
"""As template module creation can invoke template code for
- asynchronous executions this method must be used instead of the
+ asynchronous exections this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError(
- "This feature is not available for this version of Python"
- )
+ raise NotImplementedError('This feature is not available for this '
+ 'version of Python')
@internalcode
def _get_default_module(self):
@@ -1207,16 +1124,15 @@ def is_up_to_date(self):
@property
def debug_info(self):
"""The debug info mapping."""
- if self._debug_info:
- return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
- return []
+ return [tuple(imap(int, x.split('='))) for x in
+ self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
- name = "memory:%x" % id(self)
+ name = 'memory:%x' % id(self)
else:
name = repr(self.name)
- return "<%s %s>" % (self.__class__.__name__, name)
+ return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
@@ -1229,12 +1145,10 @@ class TemplateModule(object):
def __init__(self, template, context, body_stream=None):
if body_stream is None:
if context.environment.is_async:
- raise RuntimeError(
- "Async mode requires a body stream "
- "to be passed to a template module. Use "
- "the async methods of the API you are "
- "using."
- )
+ raise RuntimeError('Async mode requires a body stream '
+ 'to be passed to a template module. Use '
+ 'the async methods of the API you are '
+ 'using.')
body_stream = list(template.root_render_func(context))
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
@@ -1248,10 +1162,10 @@ def __str__(self):
def __repr__(self):
if self.__name__ is None:
- name = "memory:%x" % id(self)
+ name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
- return "<%s %s>" % (self.__class__.__name__, name)
+ return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
@@ -1267,7 +1181,7 @@ def __init__(self, template, undefined_to_none):
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
- rv = context.vars["result"]
+ rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@@ -1289,7 +1203,7 @@ def __init__(self, gen):
self._gen = gen
self.disable_buffering()
- def dump(self, fp, encoding=None, errors="strict"):
+ def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
@@ -1301,15 +1215,15 @@ def dump(self, fp, encoding=None, errors="strict"):
close = False
if isinstance(fp, string_types):
if encoding is None:
- encoding = "utf-8"
- fp = open(fp, "wb")
+ encoding = 'utf-8'
+ fp = open(fp, 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
- if hasattr(fp, "writelines"):
+ if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
@@ -1345,7 +1259,7 @@ def _buffered_generator(self, size):
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
- raise ValueError("buffer size too small")
+ raise ValueError('buffer size too small')
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/exceptions.py
old mode 100644
new mode 100755
index 0bf2003e..c018a33e
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/exceptions.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/exceptions.py
@@ -1,18 +1,23 @@
# -*- coding: utf-8 -*-
-from ._compat import imap
-from ._compat import implements_to_string
-from ._compat import PY2
-from ._compat import text_type
+"""
+ jinja2.exceptions
+ ~~~~~~~~~~~~~~~~~
+
+ Jinja exceptions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
-
def __init__(self, message=None):
if message is not None:
- message = text_type(message).encode("utf-8")
+ message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
@@ -20,13 +25,11 @@ def message(self):
if self.args:
message = self.args[0]
if message is not None:
- return message.decode("utf-8", "replace")
+ return message.decode('utf-8', 'replace')
def __unicode__(self):
- return self.message or u""
-
+ return self.message or u''
else:
-
def __init__(self, message=None):
Exception.__init__(self, message)
@@ -40,28 +43,16 @@ def message(self):
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
- """Raised if a template does not exist.
-
- .. versionchanged:: 2.11
- If the given name is :class:`Undefined` and no message was
- provided, an :exc:`UndefinedError` is raised.
- """
+ """Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
- IOError.__init__(self, name)
-
+ IOError.__init__(self)
if message is None:
- from .runtime import Undefined
-
- if isinstance(name, Undefined):
- name._fail_with_undefined_error()
-
message = name
-
self.message = message
self.name = name
self.templates = [name]
@@ -75,28 +66,13 @@ class TemplatesNotFound(TemplateNotFound):
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
- .. versionchanged:: 2.11
- If a name in the list of names is :class:`Undefined`, a message
- about it being undefined is shown rather than the empty string.
-
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
- from .runtime import Undefined
-
- parts = []
-
- for name in names:
- if isinstance(name, Undefined):
- parts.append(name._undefined_message)
- else:
- parts.append(name)
-
- message = u"none of the templates given were found: " + u", ".join(
- imap(text_type, parts)
- )
+ message = u'none of the templates given were found: ' + \
+ u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@@ -122,11 +98,11 @@ def __str__(self):
return self.message
# otherwise attach some stuff
- location = "line %d" % self.lineno
+ location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
- lines = [self.message, " " + location]
+ lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
@@ -135,16 +111,9 @@ def __str__(self):
except IndexError:
line = None
if line:
- lines.append(" " + line.strip())
-
- return u"\n".join(lines)
+ lines.append(' ' + line.strip())
- def __reduce__(self):
- # https://bugs.python.org/issue1692335 Exceptions that take
- # multiple required arguments have problems with pickling.
- # Without this, raises TypeError: __init__() missing 1 required
- # positional argument: 'lineno'
- return self.__class__, (self.message, self.lineno, self.name, self.filename)
+ return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/ext.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/ext.py
old mode 100644
new mode 100755
index 9141be4d..0734a84f
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/ext.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/ext.py
@@ -1,49 +1,42 @@
# -*- coding: utf-8 -*-
-"""Extension API for adding custom tags and behavior."""
-import pprint
+"""
+ jinja2.ext
+ ~~~~~~~~~~
+
+ Jinja extensions allow to add custom tags similar to the way django custom
+ tags work. By default two example extensions exist: an i18n and a cache
+ extension.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
import re
-from sys import version_info
-
-from markupsafe import Markup
-
-from . import nodes
-from ._compat import iteritems
-from ._compat import string_types
-from ._compat import with_metaclass
-from .defaults import BLOCK_END_STRING
-from .defaults import BLOCK_START_STRING
-from .defaults import COMMENT_END_STRING
-from .defaults import COMMENT_START_STRING
-from .defaults import KEEP_TRAILING_NEWLINE
-from .defaults import LINE_COMMENT_PREFIX
-from .defaults import LINE_STATEMENT_PREFIX
-from .defaults import LSTRIP_BLOCKS
-from .defaults import NEWLINE_SEQUENCE
-from .defaults import TRIM_BLOCKS
-from .defaults import VARIABLE_END_STRING
-from .defaults import VARIABLE_START_STRING
-from .environment import Environment
-from .exceptions import TemplateAssertionError
-from .exceptions import TemplateSyntaxError
-from .nodes import ContextReference
-from .runtime import concat
-from .utils import contextfunction
-from .utils import import_string
+
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.environment import Environment
+from jinja2.runtime import concat
+from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
+from jinja2.utils import contextfunction, import_string, Markup
+from jinja2._compat import with_metaclass, string_types, iteritems
+
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
-GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
-
-_ws_re = re.compile(r"\s*\n\s*")
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
- def __new__(mcs, name, bases, d):
- rv = type.__new__(mcs, name, bases, d)
- rv.identifier = rv.__module__ + "." + rv.__name__
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
+ rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
@@ -98,6 +91,10 @@ def filter_stream(self, stream):
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
+
+ In the `ext` folder of the Jinja2 source distribution there is a file
+ called `inlinegettext.py` which implements a filter that utilizes this
+ method.
"""
return stream
@@ -119,9 +116,8 @@ def attr(self, name, lineno=None):
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
- def call_method(
- self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
- ):
+ def call_method(self, name, args=None, kwargs=None, dyn_args=None,
+ dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
@@ -129,19 +125,13 @@ def call_method(
args = []
if kwargs is None:
kwargs = []
- return nodes.Call(
- self.attr(name, lineno=lineno),
- args,
- kwargs,
- dyn_args,
- dyn_kwargs,
- lineno=lineno,
- )
+ return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
+ dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve("gettext"), *args, **kwargs)
+ return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@@ -150,31 +140,24 @@ def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
- # Always treat as a format string, even if there are no
- # variables. This makes translation strings more consistent
- # and predictable. This requires escaping
return rv % variables
-
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault("num", __num)
+ variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
- # Always treat as a format string, see gettext comment above.
return rv % variables
-
return ngettext
class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja."""
-
- tags = {"trans"}
+ """This extension adds gettext support to Jinja2."""
+ tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
@@ -185,28 +168,30 @@ class InternationalizationExtension(Extension):
def __init__(self, environment):
Extension.__init__(self, environment)
- environment.globals["_"] = _gettext_alias
+ environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
- newstyle_gettext=False,
+ newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
- gettext = getattr(translations, "ugettext", None)
+ gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
- ngettext = getattr(translations, "ungettext", None)
+ ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
- lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
+ lambda x: x,
+ lambda s, p, n: (n != 1 and (p,) or (s,))[0],
+ newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
@@ -215,10 +200,13 @@ def _install_callables(self, gettext, ngettext, newstyle=None):
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(gettext=gettext, ngettext=ngettext)
+ self.environment.globals.update(
+ gettext=gettext,
+ ngettext=ngettext
+ )
def _uninstall(self, translations):
- for key in "gettext", "ngettext":
+ for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
@@ -238,44 +226,41 @@ def parse(self, parser):
plural_expr_assignment = None
variables = {}
trimmed = None
- while parser.stream.current.type != "block_end":
+ while parser.stream.current.type != 'block_end':
if variables:
- parser.stream.expect("comma")
+ parser.stream.expect('comma')
# skip colon for python compatibility
- if parser.stream.skip_if("colon"):
+ if parser.stream.skip_if('colon'):
break
- name = parser.stream.expect("name")
+ name = parser.stream.expect('name')
if name.value in variables:
- parser.fail(
- "translatable variable %r defined twice." % name.value,
- name.lineno,
- exc=TemplateAssertionError,
- )
+ parser.fail('translatable variable %r defined twice.' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
# expressions
- if parser.stream.current.type == "assign":
+ if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ("trimmed", "notrimmed"):
- trimmed = name.value == "trimmed"
+ elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
+ trimmed = name.value == 'trimmed'
continue
else:
- variables[name.value] = var = nodes.Name(name.value, "load")
+ variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
- plural_expr = nodes.Name("_trans", "load")
+ plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
- nodes.Name("_trans", "store"), var
- )
+ nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
- num_called_num = name.value == "num"
+ num_called_num = name.value == 'num'
- parser.stream.expect("block_end")
+ parser.stream.expect('block_end')
plural = None
have_plural = False
@@ -286,24 +271,22 @@ def parse(self, parser):
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], "load")
- num_called_num = singular_names[0] == "num"
+ plural_expr = nodes.Name(singular_names[0], 'load')
+ num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
- if parser.stream.current.test("name:pluralize"):
+ if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
- if parser.stream.current.type != "block_end":
- name = parser.stream.expect("name")
+ if parser.stream.current.type != 'block_end':
+ name = parser.stream.expect('name')
if name.value not in variables:
- parser.fail(
- "unknown variable %r for pluralization" % name.value,
- name.lineno,
- exc=TemplateAssertionError,
- )
+ parser.fail('unknown variable %r for pluralization' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
plural_expr = variables[name.value]
- num_called_num = name.value == "num"
- parser.stream.expect("block_end")
+ num_called_num = name.value == 'num'
+ parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
@@ -313,97 +296,88 @@ def parse(self, parser):
# register free names as simple name expressions
for var in referenced:
if var not in variables:
- variables[var] = nodes.Name(var, "load")
+ variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
- parser.fail("pluralize without variables", lineno)
+ parser.fail('pluralize without variables', lineno)
if trimmed is None:
- trimmed = self.environment.policies["ext.i18n.trimmed"]
+ trimmed = self.environment.policies['ext.i18n.trimmed']
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
- node = self._make_node(
- singular,
- plural,
- variables,
- plural_expr,
- bool(referenced),
- num_called_num and have_plural,
- )
+ node = self._make_node(singular, plural, variables, plural_expr,
+ bool(referenced),
+ num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
- def _trim_whitespace(self, string, _ws_re=_ws_re):
- return _ws_re.sub(" ", string.strip())
+ def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
+ return _ws_re.sub(' ', string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
- if parser.stream.current.type == "data":
- buf.append(parser.stream.current.value.replace("%", "%%"))
+ if parser.stream.current.type == 'data':
+ buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
- elif parser.stream.current.type == "variable_begin":
+ elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
- name = parser.stream.expect("name").value
+ name = parser.stream.expect('name').value
referenced.append(name)
- buf.append("%%(%s)s" % name)
- parser.stream.expect("variable_end")
- elif parser.stream.current.type == "block_begin":
+ buf.append('%%(%s)s' % name)
+ parser.stream.expect('variable_end')
+ elif parser.stream.current.type == 'block_begin':
next(parser.stream)
- if parser.stream.current.test("name:endtrans"):
+ if parser.stream.current.test('name:endtrans'):
break
- elif parser.stream.current.test("name:pluralize"):
+ elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
- parser.fail(
- "a translatable section can have only one pluralize section"
- )
- parser.fail(
- "control structures in translatable sections are not allowed"
- )
+ parser.fail('a translatable section can have only one '
+ 'pluralize section')
+ parser.fail('control structures in translatable sections are '
+ 'not allowed')
elif parser.stream.eos:
- parser.fail("unclosed translation block")
+ parser.fail('unclosed translation block')
else:
- raise RuntimeError("internal parser error")
+ assert False, 'internal parser error'
return referenced, concat(buf)
- def _make_node(
- self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
- ):
+ def _make_node(self, singular, plural, variables, plural_expr,
+ vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace("%%", "%")
+ singular = singular.replace('%%', '%')
if plural:
- plural = plural.replace("%%", "%")
+ plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
- gettext = nodes.Name("gettext", "load")
- node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
+ gettext = nodes.Name('gettext', 'load')
+ node = nodes.Call(gettext, [nodes.Const(singular)],
+ [], None, None)
# singular and plural
else:
- ngettext = nodes.Name("ngettext", "load")
- node = nodes.Call(
- ngettext,
- [nodes.Const(singular), nodes.Const(plural), plural_expr],
- [],
- None,
- None,
- )
+ ngettext = nodes.Name('ngettext', 'load')
+ node = nodes.Call(ngettext, [
+ nodes.Const(singular),
+ nodes.Const(plural),
+ plural_expr
+ ], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
@@ -412,7 +386,7 @@ def _make_node(
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
- if num_called_num and key == "num":
+ if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
@@ -422,24 +396,18 @@ def _make_node(
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
- node = nodes.Mod(
- node,
- nodes.Dict(
- [
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]
- ),
- )
+ node = nodes.Mod(node, nodes.Dict([
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja that works like the print statement just
+ """Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
-
- tags = set(["do"])
+ tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
@@ -449,12 +417,11 @@ def parse(self, parser):
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
-
- tags = set(["break", "continue"])
+ tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
- if token.value == "break":
+ if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
@@ -467,50 +434,8 @@ class AutoEscapeExtension(Extension):
pass
-class DebugExtension(Extension):
- """A ``{% debug %}`` tag that dumps the available variables,
- filters, and tests.
-
- .. code-block:: html+jinja
-
- {% debug %}
-
- .. code-block:: text
-
- {'context': {'cycler': ,
- ...,
- 'namespace': },
- 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
- ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
- 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
- ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
-
- .. versionadded:: 2.11.0
- """
-
- tags = {"debug"}
-
- def parse(self, parser):
- lineno = parser.stream.expect("name:debug").lineno
- context = ContextReference()
- result = self.call_method("_render", [context], lineno=lineno)
- return nodes.Output([result], lineno=lineno)
-
- def _render(self, context):
- result = {
- "context": context.get_all(),
- "filters": sorted(self.environment.filters.keys()),
- "tests": sorted(self.environment.tests.keys()),
- }
-
- # Set the depth since the intent is to show the top few names.
- if version_info[:2] >= (3, 4):
- return pprint.pformat(result, depth=3, compact=True)
- else:
- return pprint.pformat(result, depth=3)
-
-
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
+ babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
@@ -546,20 +471,19 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
- if (
- not isinstance(node.node, nodes.Name)
- or node.node.name not in gettext_functions
- ):
+ if not isinstance(node.node, nodes.Name) or \
+ node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
- if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
+ if isinstance(arg, nodes.Const) and \
+ isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
- for _ in node.kwargs:
+ for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
@@ -593,10 +517,9 @@ def __init__(self, tokens, comment_tags):
def find_backwards(self, offset):
try:
- for _, token_type, token_value in reversed(
- self.tokens[self.offset : offset]
- ):
- if token_type in ("comment", "linecomment"):
+ for _, token_type, token_value in \
+ reversed(self.tokens[self.offset:offset]):
+ if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
@@ -610,7 +533,7 @@ def find_backwards(self, offset):
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
@@ -622,7 +545,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
- try to find the best preceding comment that begins with one of the
+ try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
@@ -645,7 +568,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
(comments will be empty currently)
"""
extensions = set()
- for extension in options.get("extensions", "").split(","):
+ for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
@@ -654,37 +577,38 @@ def babel_extract(fileobj, keywords, comment_tags, options):
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
+ return options.get(key, str(default)).lower() in \
+ ('1', 'on', 'yes', 'true')
- silent = getbool(options, "silent", True)
+ silent = getbool(options, 'silent', True)
environment = Environment(
- options.get("block_start_string", BLOCK_START_STRING),
- options.get("block_end_string", BLOCK_END_STRING),
- options.get("variable_start_string", VARIABLE_START_STRING),
- options.get("variable_end_string", VARIABLE_END_STRING),
- options.get("comment_start_string", COMMENT_START_STRING),
- options.get("comment_end_string", COMMENT_END_STRING),
- options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
- options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
- getbool(options, "trim_blocks", TRIM_BLOCKS),
- getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
+ options.get('block_start_string', BLOCK_START_STRING),
+ options.get('block_end_string', BLOCK_END_STRING),
+ options.get('variable_start_string', VARIABLE_START_STRING),
+ options.get('variable_end_string', VARIABLE_END_STRING),
+ options.get('comment_start_string', COMMENT_START_STRING),
+ options.get('comment_end_string', COMMENT_END_STRING),
+ options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
+ options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
+ getbool(options, 'trim_blocks', TRIM_BLOCKS),
+ getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
- getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
+ getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
- auto_reload=False,
+ auto_reload=False
)
- if getbool(options, "trimmed"):
- environment.policies["ext.i18n.trimmed"] = True
- if getbool(options, "newstyle_gettext"):
+ if getbool(options, 'trimmed'):
+ environment.policies['ext.i18n.trimmed'] = True
+ if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
- source = fileobj.read().decode(options.get("encoding", "utf-8"))
+ source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError:
+ except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
@@ -701,4 +625,3 @@ def getbool(options, key, default=False):
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
-debug = DebugExtension
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/filters.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/filters.py
old mode 100644
new mode 100755
index 74b108dc..267dddda
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/filters.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/filters.py
@@ -1,31 +1,29 @@
# -*- coding: utf-8 -*-
-"""Built-in template filters used with the ``|`` operator."""
+"""
+ jinja2.filters
+ ~~~~~~~~~~~~~~
+
+ Bundled jinja filters.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
import math
import random
-import re
import warnings
-from collections import namedtuple
-from itertools import chain
-from itertools import groupby
-from markupsafe import escape
-from markupsafe import Markup
-from markupsafe import soft_unicode
+from itertools import groupby, chain
+from collections import namedtuple
+from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
+ unicode_urlencode, htmlsafe_json_dumps
+from jinja2.runtime import Undefined
+from jinja2.exceptions import FilterArgumentError
+from jinja2._compat import imap, string_types, text_type, iteritems, PY2
-from ._compat import abc
-from ._compat import imap
-from ._compat import iteritems
-from ._compat import string_types
-from ._compat import text_type
-from .exceptions import FilterArgumentError
-from .runtime import Undefined
-from .utils import htmlsafe_json_dumps
-from .utils import pformat
-from .utils import unicode_urlencode
-from .utils import urlize
-_word_re = re.compile(r"\w+", re.UNICODE)
-_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
+_word_re = re.compile(r'\w+', re.UNICODE)
+_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
def contextfilter(f):
@@ -61,21 +59,23 @@ def ignore_case(value):
return value.lower() if isinstance(value, string_types) else value
-def make_attrgetter(environment, attribute, postprocess=None, default=None):
+def make_attrgetter(environment, attribute, postprocess=None):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
- attribute = _prepare_attribute_parts(attribute)
+ if attribute is None:
+ attribute = []
+ elif isinstance(attribute, string_types):
+ attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
+ else:
+ attribute = [attribute]
def attrgetter(item):
for part in attribute:
item = environment.getitem(item, part)
- if default and isinstance(item, Undefined):
- item = default
-
if postprocess is not None:
item = postprocess(item)
@@ -84,84 +84,32 @@ def attrgetter(item):
return attrgetter
-def make_multi_attrgetter(environment, attribute, postprocess=None):
- """Returns a callable that looks up the given comma separated
- attributes from a passed object with the rules of the environment.
- Dots are allowed to access attributes of each attribute. Integer
- parts in paths are looked up as integers.
-
- The value returned by the returned callable is a list of extracted
- attribute values.
-
- Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
- """
- attribute_parts = (
- attribute.split(",") if isinstance(attribute, string_types) else [attribute]
- )
- attribute = [
- _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
- ]
-
- def attrgetter(item):
- items = [None] * len(attribute)
- for i, attribute_part in enumerate(attribute):
- item_i = item
- for part in attribute_part:
- item_i = environment.getitem(item_i, part)
-
- if postprocess is not None:
- item_i = postprocess(item_i)
-
- items[i] = item_i
- return items
-
- return attrgetter
-
-
-def _prepare_attribute_parts(attr):
- if attr is None:
- return []
- elif isinstance(attr, string_types):
- return [int(x) if x.isdigit() else x for x in attr.split(".")]
- else:
- return [attr]
-
-
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
- if hasattr(value, "__html__"):
+ if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
- """Quote data for use in a URL path or query using UTF-8.
-
- Basic wrapper around :func:`urllib.parse.quote` when given a
- string, or :func:`urllib.parse.urlencode` for a dict or iterable.
-
- :param value: Data to quote. A string will be quoted directly. A
- dict or iterable of ``(key, value)`` pairs will be joined as a
- query string.
-
- When given a string, "/" is not quoted. HTTP servers treat "/" and
- "%2F" equivalently in paths. If you need quoted slashes, use the
- ``|replace("/", "%2F")`` filter.
+ """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
+ dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
- if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
- return unicode_urlencode(value)
-
+ itemiter = None
if isinstance(value, dict):
- items = iteritems(value)
- else:
- items = iter(value)
-
- return u"&".join(
- "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
- for k, v in items
- )
+ itemiter = iteritems(value)
+ elif not isinstance(value, string_types):
+ try:
+ itemiter = iter(value)
+ except TypeError:
+ pass
+ if itemiter is None:
+ return unicode_urlencode(value)
+ return u'&'.join(unicode_urlencode(k) + '=' +
+ unicode_urlencode(v, for_qs=True)
+ for k, v in itemiter)
@evalcontextfilter
@@ -184,11 +132,8 @@ def do_replace(eval_ctx, s, old, new, count=None):
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
- if (
- hasattr(old, "__html__")
- or hasattr(new, "__html__")
- and not hasattr(s, "__html__")
- ):
+ if hasattr(old, '__html__') or hasattr(new, '__html__') and \
+ not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
@@ -229,13 +174,13 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
- rv = u" ".join(
+ rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
- rv = u" " + rv
+ rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
@@ -252,40 +197,39 @@ def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
- return "".join(
- [
- item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
- if item
- ]
- )
+ return ''.join(
+ [item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_unicode(s))
+ if item])
-def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
+def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
- {% for key, value in mydict|dictsort %}
+ {% for item in mydict|dictsort %}
sort the dict by key, case insensitive
- {% for key, value in mydict|dictsort(reverse=true) %}
+ {% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
- {% for key, value in mydict|dictsort(true) %}
+ {% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
- {% for key, value in mydict|dictsort(false, 'value') %}
+ {% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
- if by == "key":
+ if by == 'key':
pos = 0
- elif by == "value":
+ elif by == 'value':
pos = 1
else:
- raise FilterArgumentError('You can only sort by either "key" or "value"')
+ raise FilterArgumentError(
+ 'You can only sort by either "key" or "value"'
+ )
def sort_func(item):
value = item[pos]
@@ -299,62 +243,48 @@ def sort_func(item):
@environmentfilter
-def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
- """Sort an iterable using Python's :func:`sorted`.
-
- .. sourcecode:: jinja
-
- {% for city in cities|sort %}
- ...
- {% endfor %}
-
- :param reverse: Sort descending instead of ascending.
- :param case_sensitive: When sorting strings, sort upper and lower
- case separately.
- :param attribute: When sorting objects or dicts, an attribute or
- key to sort by. Can use dot notation like ``"address.city"``.
- Can be a list of attributes like ``"age,name"``.
+def do_sort(
+ environment, value, reverse=False, case_sensitive=False, attribute=None
+):
+ """Sort an iterable. Per default it sorts ascending, if you pass it
+ true as first argument it will reverse the sorting.
- The sort is stable, it does not change the relative order of
- elements that compare equal. This makes it is possible to chain
- sorts on different attributes and ordering.
+ If the iterable is made of strings the third parameter can be used to
+ control the case sensitiveness of the comparison which is disabled by
+ default.
.. sourcecode:: jinja
- {% for user in users|sort(attribute="name")
- |sort(reverse=true, attribute="age") %}
+ {% for item in iterable|sort %}
...
{% endfor %}
- As a shortcut to chaining when the direction is the same for all
- attributes, pass a comma separate list of attributes.
+ It is also possible to sort by an attribute (for example to sort
+ by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
- {% for user users|sort(attribute="age,name") %}
+ {% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
- .. versionchanged:: 2.11.0
- The ``attribute`` parameter can be a comma separated list of
- attributes, e.g. ``"age,name"``.
-
.. versionchanged:: 2.6
- The ``attribute`` parameter was added.
+ The `attribute` parameter was added.
"""
- key_func = make_multi_attrgetter(
- environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ key_func = make_attrgetter(
+ environment, attribute,
+ postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@environmentfilter
def do_unique(environment, value, case_sensitive=False, attribute=None):
- """Returns a list of unique items from the given iterable.
+ """Returns a list of unique items from the the given iterable.
.. sourcecode:: jinja
- {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
@@ -364,7 +294,8 @@ def do_unique(environment, value, case_sensitive=False, attribute=None):
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
- environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ environment, attribute,
+ postprocess=ignore_case if not case_sensitive else None
)
seen = set()
@@ -382,10 +313,11 @@ def _min_or_max(environment, value, func, case_sensitive, attribute):
try:
first = next(it)
except StopIteration:
- return environment.undefined("No aggregated item, sequence was empty.")
+ return environment.undefined('No aggregated item, sequence was empty.')
key_func = make_attrgetter(
- environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ environment, attribute,
+ ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@@ -400,7 +332,7 @@ def do_min(environment, value, case_sensitive=False, attribute=None):
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the min value of this attribute.
+ :param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@@ -420,7 +352,7 @@ def do_max(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, max, case_sensitive, attribute)
-def do_default(value, default_value=u"", boolean=False):
+def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
@@ -436,12 +368,6 @@ def do_default(value, default_value=u"", boolean=False):
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
-
- .. versionchanged:: 2.11
- It's now possible to configure the :class:`~jinja2.Environment` with
- :class:`~jinja2.ChainableUndefined` to make the `default` filter work
- on nested elements and attributes that may contain undefined values
- in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
@@ -449,7 +375,7 @@ def do_default(value, default_value=u"", boolean=False):
@evalcontextfilter
-def do_join(eval_ctx, value, d=u"", attribute=None):
+def do_join(eval_ctx, value, d=u'', attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
@@ -474,17 +400,17 @@ def do_join(eval_ctx, value, d=u"", attribute=None):
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
- # no automatic escaping? joining is a lot easier then
+ # no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
- if not hasattr(d, "__html__"):
+ if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
- if hasattr(item, "__html__"):
+ if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
@@ -509,25 +435,16 @@ def do_first(environment, seq):
try:
return next(iter(seq))
except StopIteration:
- return environment.undefined("No first item, sequence was empty.")
+ return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
- """
- Return the last item of a sequence.
-
- Note: Does not work with generators. You may want to explicitly
- convert it to a list:
-
- .. sourcecode:: jinja
-
- {{ data | selectattr('name', '==', 'Jinja') | list | last }}
- """
+ """Return the last item of a sequence."""
try:
return next(iter(reversed(seq)))
except StopIteration:
- return environment.undefined("No last item, sequence was empty.")
+ return environment.undefined('No last item, sequence was empty.')
@contextfilter
@@ -536,7 +453,7 @@ def do_random(context, seq):
try:
return random.choice(seq)
except IndexError:
- return context.environment.undefined("No random item, sequence was empty.")
+ return context.environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
@@ -548,25 +465,25 @@ def do_filesizeformat(value, binary=False):
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
- (binary and "KiB" or "kB"),
- (binary and "MiB" or "MB"),
- (binary and "GiB" or "GB"),
- (binary and "TiB" or "TB"),
- (binary and "PiB" or "PB"),
- (binary and "EiB" or "EB"),
- (binary and "ZiB" or "ZB"),
- (binary and "YiB" or "YB"),
+ (binary and 'KiB' or 'kB'),
+ (binary and 'MiB' or 'MB'),
+ (binary and 'GiB' or 'GB'),
+ (binary and 'TiB' or 'TB'),
+ (binary and 'PiB' or 'PB'),
+ (binary and 'EiB' or 'EB'),
+ (binary and 'ZiB' or 'ZB'),
+ (binary and 'YiB' or 'YB')
]
if bytes == 1:
- return "1 Byte"
+ return '1 Byte'
elif bytes < base:
- return "%d Bytes" % bytes
+ return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
- return "%.1f %s" % ((base * bytes / unit), prefix)
- return "%.1f %s" % ((base * bytes / unit), prefix)
+ return '%.1f %s' % ((base * bytes / unit), prefix)
+ return '%.1f %s' % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
@@ -579,9 +496,8 @@ def do_pprint(value, verbose=False):
@evalcontextfilter
-def do_urlize(
- eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
-):
+def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
+ target=None, rel=None):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
@@ -604,20 +520,22 @@ def do_urlize(
The *target* parameter was added.
"""
policies = eval_ctx.environment.policies
- rel = set((rel or "").split() or [])
+ rel = set((rel or '').split() or [])
if nofollow:
- rel.add("nofollow")
- rel.update((policies["urlize.rel"] or "").split())
+ rel.add('nofollow')
+ rel.update((policies['urlize.rel'] or '').split())
if target is None:
- target = policies["urlize.target"]
- rel = " ".join(sorted(rel)) or None
+ target = policies['urlize.target']
+ rel = ' '.join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
-def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
+def do_indent(
+ s, width=4, first=False, blank=False, indentfirst=None
+):
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
@@ -631,31 +549,22 @@ def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
Rename the ``indentfirst`` argument to ``first``.
"""
if indentfirst is not None:
- warnings.warn(
- "The 'indentfirst' argument is renamed to 'first' and will"
- " be removed in version 3.0.",
- DeprecationWarning,
- stacklevel=2,
- )
+ warnings.warn(DeprecationWarning(
+ 'The "indentfirst" argument is renamed to "first".'
+ ), stacklevel=2)
first = indentfirst
- indention = u" " * width
- newline = u"\n"
-
- if isinstance(s, Markup):
- indention = Markup(indention)
- newline = Markup(newline)
-
- s += newline # this quirk is necessary for splitlines method
+ s += u'\n' # this quirk is necessary for splitlines method
+ indention = u' ' * width
if blank:
- rv = (newline + indention).join(s.splitlines())
+ rv = (u'\n' + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
- rv += newline + newline.join(
+ rv += u'\n' + u'\n'.join(
indention + line if line else line for line in lines
)
@@ -666,7 +575,7 @@ def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
+def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
@@ -687,81 +596,46 @@ def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
- The default leeway on newer Jinja versions is 5 and was 0 before but
+ The default leeway on newer Jinja2 versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
- leeway = env.policies["truncate.leeway"]
- assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
- assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
+ leeway = env.policies['truncate.leeway']
+ assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
+ assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
if len(s) <= length + leeway:
return s
if killwords:
- return s[: length - len(end)] + end
- result = s[: length - len(end)].rsplit(" ", 1)[0]
+ return s[:length - len(end)] + end
+ result = s[:length - len(end)].rsplit(' ', 1)[0]
return result + end
@environmentfilter
-def do_wordwrap(
- environment,
- s,
- width=79,
- break_long_words=True,
- wrapstring=None,
- break_on_hyphens=True,
-):
- """Wrap a string to the given width. Existing newlines are treated
- as paragraphs to be wrapped separately.
-
- :param s: Original text to wrap.
- :param width: Maximum length of wrapped lines.
- :param break_long_words: If a word is longer than ``width``, break
- it across lines.
- :param break_on_hyphens: If a word contains hyphens, it may be split
- across lines.
- :param wrapstring: String to join each wrapped line. Defaults to
- :attr:`Environment.newline_sequence`.
-
- .. versionchanged:: 2.11
- Existing newlines are treated as paragraphs wrapped separately.
-
- .. versionchanged:: 2.11
- Added the ``break_on_hyphens`` parameter.
-
- .. versionchanged:: 2.7
- Added the ``wrapstring`` parameter.
+def do_wordwrap(environment, s, width=79, break_long_words=True,
+ wrapstring=None):
"""
+ Return a copy of the string passed to the filter wrapped after
+ ``79`` characters. You can override this default using the first
+ parameter. If you set the second parameter to `false` Jinja will not
+ split words apart if they are longer than `width`. By default, the newlines
+ will be the default newlines for the environment, but this can be changed
+ using the wrapstring keyword argument.
- import textwrap
-
+ .. versionadded:: 2.7
+ Added support for the `wrapstring` parameter.
+ """
if not wrapstring:
wrapstring = environment.newline_sequence
-
- # textwrap.wrap doesn't consider existing newlines when wrapping.
- # If the string has a newline before width, wrap will still insert
- # a newline at width, resulting in a short line. Instead, split and
- # wrap each paragraph individually.
- return wrapstring.join(
- [
- wrapstring.join(
- textwrap.wrap(
- line,
- width=width,
- expand_tabs=False,
- replace_whitespace=False,
- break_long_words=break_long_words,
- break_on_hyphens=break_on_hyphens,
- )
- )
- for line in s.splitlines()
- ]
- )
+ import textwrap
+ return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
- return len(_word_re.findall(soft_unicode(s)))
+ return len(_word_re.findall(s))
def do_int(value, default=0, base=10):
@@ -797,40 +671,29 @@ def do_float(value, default=0.0):
def do_format(value, *args, **kwargs):
- """Apply the given values to a `printf-style`_ format string, like
- ``string % values``.
+ """
+ Apply python string formatting on an object:
.. sourcecode:: jinja
- {{ "%s, %s!"|format(greeting, name) }}
- Hello, World!
-
- In most cases it should be more convenient and efficient to use the
- ``%`` operator or :meth:`str.format`.
-
- .. code-block:: text
-
- {{ "%s, %s!" % (greeting, name) }}
- {{ "{}, {}!".format(greeting, name) }}
-
- .. _printf-style: https://docs.python.org/library/stdtypes.html
- #printf-style-string-formatting
+ {{ "%s - %s"|format("Hello?", "Foo!") }}
+ -> Hello? - Foo!
"""
if args and kwargs:
- raise FilterArgumentError(
- "can't handle positional and keyword arguments at the same time"
- )
+ raise FilterArgumentError('can\'t handle positional and keyword '
+ 'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
-def do_trim(value, chars=None):
- """Strip leading and trailing characters, by default whitespace."""
- return soft_unicode(value).strip(chars)
+def do_trim(value):
+ """Strip leading and trailing whitespace."""
+ return soft_unicode(value).strip()
def do_striptags(value):
- """Strip SGML/XML tags and replace adjacent whitespace by one space."""
- if hasattr(value, "__html__"):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space.
+ """
+ if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags()
@@ -842,7 +705,7 @@ def do_slice(value, slices, fill_with=None):
.. sourcecode:: html+jinja
-
+
{%- for column in items|slice(3) %}
{%- for item in column %}
@@ -902,7 +765,7 @@ def do_batch(value, linecount, fill_with=None):
yield tmp
-def do_round(value, precision=0, method="common"):
+def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
@@ -928,9 +791,9 @@ def do_round(value, precision=0, method="common"):
{{ 42.55|round|int }}
-> 43
"""
- if method not in {"common", "ceil", "floor"}:
- raise FilterArgumentError("method must be common, ceil or floor")
- if method == "common":
+ if not method in ('common', 'ceil', 'floor'):
+ raise FilterArgumentError('method must be common, ceil or floor')
+ if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@@ -941,51 +804,52 @@ def do_round(value, precision=0, method="common"):
# we do not want to accidentally expose an auto generated repr in case
# people start to print this out in comments or something similar for
# debugging.
-_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
+_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
_GroupTuple.__repr__ = tuple.__repr__
_GroupTuple.__str__ = tuple.__str__
-
@environmentfilter
def do_groupby(environment, value, attribute):
- """Group a sequence of objects by an attribute using Python's
- :func:`itertools.groupby`. The attribute can use dot notation for
- nested access, like ``"address.city"``. Unlike Python's ``groupby``,
- the values are sorted first so only one group is returned for each
- unique value.
+ """Group a sequence of objects by a common attribute.
- For example, a list of ``User`` objects with a ``city`` attribute
- can be rendered in groups. In this example, ``grouper`` refers to
- the ``city`` value of the group.
+ If you for example have a list of dicts or objects that represent persons
+ with `gender`, `first_name` and `last_name` attributes and you want to
+ group all users by genders you can do something like the following
+ snippet:
.. sourcecode:: html+jinja
- {% for city, items in users|groupby("city") %}
- {{ city }}
- {% for user in items %}
- {{ user.name }}
- {% endfor %}
-
- {% endfor %}
+
+ {% for group in persons|groupby('gender') %}
+ {{ group.grouper }}
+ {% for person in group.list %}
+ {{ person.first_name }} {{ person.last_name }}
+ {% endfor %}
+ {% endfor %}
+
- ``groupby`` yields namedtuples of ``(grouper, list)``, which
- can be used instead of the tuple unpacking above. ``grouper`` is the
- value of the attribute, and ``list`` is the items with that value.
+ Additionally it's possible to use tuple unpacking for the grouper and
+ list:
.. sourcecode:: html+jinja
- {% for group in users|groupby("city") %}
- {{ group.grouper }}: {{ group.list|join(", ") }}
- {% endfor %}
+
+ {% for grouper, list in persons|groupby('gender') %}
+ ...
+ {% endfor %}
+
+
+ As you can see the item we're grouping by is stored in the `grouper`
+ attribute and the `list` contains all the objects that have this grouper
+ in common.
.. versionchanged:: 2.6
- The attribute supports dot notation for nested access.
+ It's now possible to use dotted notation to group by the child
+ attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
- return [
- _GroupTuple(key, list(values))
- for key, values in groupby(sorted(value, key=expr), expr)
- ]
+ return [_GroupTuple(key, list(values)) for key, values
+ in groupby(sorted(value, key=expr), expr)]
@environmentfilter
@@ -1042,7 +906,7 @@ def do_reverse(value):
rv.reverse()
return rv
except TypeError:
- raise FilterArgumentError("argument must be iterable")
+ raise FilterArgumentError('argument must be iterable')
@environmentfilter
@@ -1063,9 +927,8 @@ def do_attr(environment, obj, name):
except AttributeError:
pass
else:
- if environment.sandboxed and not environment.is_safe_attribute(
- obj, name, value
- ):
+ if environment.sandboxed and not \
+ environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@@ -1084,13 +947,6 @@ def do_map(*args, **kwargs):
Users on this page: {{ users|map(attribute='username')|join(', ') }}
- You can specify a ``default`` value to use if an object in the list
- does not have the given attribute.
-
- .. sourcecode:: jinja
-
- {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
-
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
@@ -1099,17 +955,6 @@ def do_map(*args, **kwargs):
Users on this page: {{ titles|map('lower')|join(', ') }}
- Similar to a generator comprehension such as:
-
- .. code-block:: python
-
- (u.username for u in users)
- (u.username or "Anonymous" for u in users)
- (do_lower(x) for x in titles)
-
- .. versionchanged:: 2.11.0
- Added the ``default`` parameter.
-
.. versionadded:: 2.7
"""
seq, func = prepare_map(args, kwargs)
@@ -1135,13 +980,6 @@ def do_select(*args, **kwargs):
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
- Similar to a generator comprehension such as:
-
- .. code-block:: python
-
- (n for n in numbers if test_odd(n))
- (n for n in numbers if test_divisibleby(n, 3))
-
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, False)
@@ -1160,12 +998,6 @@ def do_reject(*args, **kwargs):
{{ numbers|reject("odd") }}
- Similar to a generator comprehension such as:
-
- .. code-block:: python
-
- (n for n in numbers if not test_odd(n))
-
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, False)
@@ -1187,13 +1019,6 @@ def do_selectattr(*args, **kwargs):
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
- Similar to a generator comprehension such as:
-
- .. code-block:: python
-
- (u for user in users if user.is_active)
- (u for user in users if test_none(user.email))
-
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, True)
@@ -1213,13 +1038,6 @@ def do_rejectattr(*args, **kwargs):
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
- Similar to a generator comprehension such as:
-
- .. code-block:: python
-
- (u for user in users if not user.is_active)
- (u for user in users if not test_none(user.email))
-
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, True)
@@ -1252,38 +1070,32 @@ def do_tojson(eval_ctx, value, indent=None):
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
- dumper = policies["json.dumps_function"]
- options = policies["json.dumps_kwargs"]
+ dumper = policies['json.dumps_function']
+ options = policies['json.dumps_kwargs']
if indent is not None:
options = dict(options)
- options["indent"] = indent
+ options['indent'] = indent
return htmlsafe_json_dumps(value, dumper=dumper, **options)
def prepare_map(args, kwargs):
context = args[0]
seq = args[1]
- default = None
- if len(args) == 2 and "attribute" in kwargs:
- attribute = kwargs.pop("attribute")
- default = kwargs.pop("default", None)
+ if len(args) == 2 and 'attribute' in kwargs:
+ attribute = kwargs.pop('attribute')
if kwargs:
- raise FilterArgumentError(
- "Unexpected keyword argument %r" % next(iter(kwargs))
- )
- func = make_attrgetter(context.environment, attribute, default=default)
+ raise FilterArgumentError('Unexpected keyword argument %r' %
+ next(iter(kwargs)))
+ func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
- raise FilterArgumentError("map requires a filter argument")
-
- def func(item):
- return context.environment.call_filter(
- name, item, args, kwargs, context=context
- )
+ raise FilterArgumentError('map requires a filter argument')
+ func = lambda item: context.environment.call_filter(
+ name, item, args, kwargs, context=context)
return seq, func
@@ -1295,22 +1107,18 @@ def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
try:
attr = args[2]
except LookupError:
- raise FilterArgumentError("Missing parameter for attribute name")
+ raise FilterArgumentError('Missing parameter for attribute name')
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
-
- def transfunc(x):
- return x
+ transfunc = lambda x: x
try:
name = args[2 + off]
- args = args[3 + off :]
-
- def func(item):
- return context.environment.call_test(name, item, args, kwargs)
-
+ args = args[3 + off:]
+ func = lambda item: context.environment.call_test(
+ name, item, args, kwargs)
except LookupError:
func = bool
@@ -1326,57 +1134,57 @@ def select_or_reject(args, kwargs, modfunc, lookup_attr):
FILTERS = {
- "abs": abs,
- "attr": do_attr,
- "batch": do_batch,
- "capitalize": do_capitalize,
- "center": do_center,
- "count": len,
- "d": do_default,
- "default": do_default,
- "dictsort": do_dictsort,
- "e": escape,
- "escape": escape,
- "filesizeformat": do_filesizeformat,
- "first": do_first,
- "float": do_float,
- "forceescape": do_forceescape,
- "format": do_format,
- "groupby": do_groupby,
- "indent": do_indent,
- "int": do_int,
- "join": do_join,
- "last": do_last,
- "length": len,
- "list": do_list,
- "lower": do_lower,
- "map": do_map,
- "min": do_min,
- "max": do_max,
- "pprint": do_pprint,
- "random": do_random,
- "reject": do_reject,
- "rejectattr": do_rejectattr,
- "replace": do_replace,
- "reverse": do_reverse,
- "round": do_round,
- "safe": do_mark_safe,
- "select": do_select,
- "selectattr": do_selectattr,
- "slice": do_slice,
- "sort": do_sort,
- "string": soft_unicode,
- "striptags": do_striptags,
- "sum": do_sum,
- "title": do_title,
- "trim": do_trim,
- "truncate": do_truncate,
- "unique": do_unique,
- "upper": do_upper,
- "urlencode": do_urlencode,
- "urlize": do_urlize,
- "wordcount": do_wordcount,
- "wordwrap": do_wordwrap,
- "xmlattr": do_xmlattr,
- "tojson": do_tojson,
+ 'abs': abs,
+ 'attr': do_attr,
+ 'batch': do_batch,
+ 'capitalize': do_capitalize,
+ 'center': do_center,
+ 'count': len,
+ 'd': do_default,
+ 'default': do_default,
+ 'dictsort': do_dictsort,
+ 'e': escape,
+ 'escape': escape,
+ 'filesizeformat': do_filesizeformat,
+ 'first': do_first,
+ 'float': do_float,
+ 'forceescape': do_forceescape,
+ 'format': do_format,
+ 'groupby': do_groupby,
+ 'indent': do_indent,
+ 'int': do_int,
+ 'join': do_join,
+ 'last': do_last,
+ 'length': len,
+ 'list': do_list,
+ 'lower': do_lower,
+ 'map': do_map,
+ 'min': do_min,
+ 'max': do_max,
+ 'pprint': do_pprint,
+ 'random': do_random,
+ 'reject': do_reject,
+ 'rejectattr': do_rejectattr,
+ 'replace': do_replace,
+ 'reverse': do_reverse,
+ 'round': do_round,
+ 'safe': do_mark_safe,
+ 'select': do_select,
+ 'selectattr': do_selectattr,
+ 'slice': do_slice,
+ 'sort': do_sort,
+ 'string': soft_unicode,
+ 'striptags': do_striptags,
+ 'sum': do_sum,
+ 'title': do_title,
+ 'trim': do_trim,
+ 'truncate': do_truncate,
+ 'unique': do_unique,
+ 'upper': do_upper,
+ 'urlencode': do_urlencode,
+ 'urlize': do_urlize,
+ 'wordcount': do_wordcount,
+ 'wordwrap': do_wordwrap,
+ 'xmlattr': do_xmlattr,
+ 'tojson': do_tojson,
}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/idtracking.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/idtracking.py
old mode 100644
new mode 100755
index 9a0d8380..491bfe08
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/idtracking.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/idtracking.py
@@ -1,10 +1,11 @@
-from ._compat import iteritems
-from .visitor import NodeVisitor
+from jinja2.visitor import NodeVisitor
+from jinja2._compat import iteritems
-VAR_LOAD_PARAMETER = "param"
-VAR_LOAD_RESOLVE = "resolve"
-VAR_LOAD_ALIAS = "alias"
-VAR_LOAD_UNDEFINED = "undefined"
+
+VAR_LOAD_PARAMETER = 'param'
+VAR_LOAD_RESOLVE = 'resolve'
+VAR_LOAD_ALIAS = 'alias'
+VAR_LOAD_UNDEFINED = 'undefined'
def find_symbols(nodes, parent_symbols=None):
@@ -22,6 +23,7 @@ def symbols_for_node(node, parent_symbols=None):
class Symbols(object):
+
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
@@ -39,7 +41,7 @@ def analyze_node(self, node, **kwargs):
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
- ident = "l_%d_%s" % (self.level, name)
+ ident = 'l_%d_%s' % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
@@ -60,10 +62,8 @@ def find_ref(self, name):
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
- raise AssertionError(
- "Tried to resolve a name to a reference that "
- "was unknown to the frame (%r)" % name
- )
+ raise AssertionError('Tried to resolve a name to a reference that '
+ 'was unknown to the frame (%r)' % name)
return rv
def copy(self):
@@ -118,7 +118,7 @@ def branch_update(self, branch_symbols):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
- assert target is not None, "should not happen"
+ assert target is not None, 'should not happen'
if self.parent is not None:
outer_target = self.parent.find_ref(name)
@@ -149,6 +149,7 @@ def dump_param_targets(self):
class RootVisitor(NodeVisitor):
+
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
@@ -156,39 +157,35 @@ def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
- visit_Template = (
- visit_Block
- ) = (
- visit_Macro
- ) = (
- visit_FilterBlock
- ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
+ visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
+ visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
+ _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
- for child in node.iter_child_nodes(exclude=("call",)):
+ for child in node.iter_child_nodes(exclude=('call',)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
- def visit_For(self, node, for_branch="body", **kwargs):
- if for_branch == "body":
+ def visit_For(self, node, for_branch='body', **kwargs):
+ if for_branch == 'body':
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
- elif for_branch == "else":
+ elif for_branch == 'else':
branch = node.else_
- elif for_branch == "test":
+ elif for_branch == 'test':
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
- raise RuntimeError("Unknown for branch")
+ raise RuntimeError('Unknown for branch')
for item in branch or ():
self.sym_visitor.visit(item)
@@ -199,9 +196,8 @@ def visit_With(self, node, **kwargs):
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
- raise NotImplementedError(
- "Cannot find symbols for %r" % node.__class__.__name__
- )
+ raise NotImplementedError('Cannot find symbols for %r' %
+ node.__class__.__name__)
class FrameSymbolVisitor(NodeVisitor):
@@ -212,11 +208,11 @@ def __init__(self, symbols):
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
- if store_as_param or node.ctx == "param":
+ if store_as_param or node.ctx == 'param':
self.symbols.declare_parameter(node.name)
- elif node.ctx == "store":
+ elif node.ctx == 'store':
self.symbols.store(node.name)
- elif node.ctx == "load":
+ elif node.ctx == 'load':
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/lexer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/lexer.py
old mode 100644
new mode 100755
index 552356a1..6fd135dd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/lexer.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/lexer.py
@@ -1,194 +1,185 @@
# -*- coding: utf-8 -*-
-"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
-is used to do some preprocessing. It filters out invalid operators like
-the bitshift operators we don't allow in templates. It separates
-template code and python code in expressions.
+"""
+ jinja2.lexer
+ ~~~~~~~~~~~~
+
+ This module implements a Jinja / Python combination lexer. The
+ `Lexer` class provided by this module is used to do some preprocessing
+ for Jinja.
+
+ On the one hand it filters out invalid operators like the bitshift
+ operators we don't allow in templates. On the other hand it separates
+ template code and python code in expressions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
"""
import re
-from ast import literal_eval
from collections import deque
from operator import itemgetter
-from ._compat import implements_iterator
-from ._compat import intern
-from ._compat import iteritems
-from ._compat import text_type
-from .exceptions import TemplateSyntaxError
-from .utils import LRUCache
+from jinja2._compat import implements_iterator, intern, iteritems, text_type
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
-whitespace_re = re.compile(r"\s+", re.U)
-newline_re = re.compile(r"(\r\n|\r|\n)")
-string_re = re.compile(
- r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
-)
-integer_re = re.compile(r"(\d+_)*\d+")
-float_re = re.compile(
- r"""
- (?", "eval")
+ compile('föö', '', 'eval')
except SyntaxError:
- # Python 2, no Unicode support, use ASCII identifiers
- name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
+ # no Unicode support, use ASCII identifiers
+ name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
check_ident = False
else:
- # Unicode support, import generated re pattern and set flag to use
- # str.isidentifier to validate during lexing.
- from ._identifier import pattern as name_re
-
+ # Unicode support, build a pattern to match valid characters, and set flag
+ # to use str.isidentifier to validate during lexing
+ from jinja2 import _identifier
+ name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
check_ident = True
+ # remove the pattern from memory after building the regex
+ import sys
+ del sys.modules['jinja2._identifier']
+ import jinja2
+ del jinja2._identifier
+ del _identifier
+
+float_re = re.compile(r'(?": TOKEN_GT,
- ">=": TOKEN_GTEQ,
- "<": TOKEN_LT,
- "<=": TOKEN_LTEQ,
- "=": TOKEN_ASSIGN,
- ".": TOKEN_DOT,
- ":": TOKEN_COLON,
- "|": TOKEN_PIPE,
- ",": TOKEN_COMMA,
- ";": TOKEN_SEMICOLON,
+ '+': TOKEN_ADD,
+ '-': TOKEN_SUB,
+ '/': TOKEN_DIV,
+ '//': TOKEN_FLOORDIV,
+ '*': TOKEN_MUL,
+ '%': TOKEN_MOD,
+ '**': TOKEN_POW,
+ '~': TOKEN_TILDE,
+ '[': TOKEN_LBRACKET,
+ ']': TOKEN_RBRACKET,
+ '(': TOKEN_LPAREN,
+ ')': TOKEN_RPAREN,
+ '{': TOKEN_LBRACE,
+ '}': TOKEN_RBRACE,
+ '==': TOKEN_EQ,
+ '!=': TOKEN_NE,
+ '>': TOKEN_GT,
+ '>=': TOKEN_GTEQ,
+ '<': TOKEN_LT,
+ '<=': TOKEN_LTEQ,
+ '=': TOKEN_ASSIGN,
+ '.': TOKEN_DOT,
+ ':': TOKEN_COLON,
+ '|': TOKEN_PIPE,
+ ',': TOKEN_COMMA,
+ ';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
-assert len(operators) == len(reverse_operators), "operators dropped"
-operator_re = re.compile(
- "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
-)
-
-ignored_tokens = frozenset(
- [
- TOKEN_COMMENT_BEGIN,
- TOKEN_COMMENT,
- TOKEN_COMMENT_END,
- TOKEN_WHITESPACE,
- TOKEN_LINECOMMENT_BEGIN,
- TOKEN_LINECOMMENT_END,
- TOKEN_LINECOMMENT,
- ]
-)
-ignore_if_empty = frozenset(
- [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
-)
+assert len(operators) == len(reverse_operators), 'operators dropped'
+operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
+ sorted(operators, key=lambda x: -len(x))))
+
+ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
+ TOKEN_COMMENT_END, TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT])
+ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
+ TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
- TOKEN_COMMENT_BEGIN: "begin of comment",
- TOKEN_COMMENT_END: "end of comment",
- TOKEN_COMMENT: "comment",
- TOKEN_LINECOMMENT: "comment",
- TOKEN_BLOCK_BEGIN: "begin of statement block",
- TOKEN_BLOCK_END: "end of statement block",
- TOKEN_VARIABLE_BEGIN: "begin of print statement",
- TOKEN_VARIABLE_END: "end of print statement",
- TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
- TOKEN_LINESTATEMENT_END: "end of line statement",
- TOKEN_DATA: "template data / text",
- TOKEN_EOF: "end of template",
+ TOKEN_COMMENT_BEGIN: 'begin of comment',
+ TOKEN_COMMENT_END: 'end of comment',
+ TOKEN_COMMENT: 'comment',
+ TOKEN_LINECOMMENT: 'comment',
+ TOKEN_BLOCK_BEGIN: 'begin of statement block',
+ TOKEN_BLOCK_END: 'end of statement block',
+ TOKEN_VARIABLE_BEGIN: 'begin of print statement',
+ TOKEN_VARIABLE_END: 'end of print statement',
+ TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
+ TOKEN_LINESTATEMENT_END: 'end of line statement',
+ TOKEN_DATA: 'template data / text',
+ TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
- if token.type == TOKEN_NAME:
+ if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
- if ":" in expr:
- type, value = expr.split(":", 1)
- if type == TOKEN_NAME:
+ if ':' in expr:
+ type, value = expr.split(':', 1)
+ if type == 'name':
return value
else:
type = expr
@@ -206,39 +197,21 @@ def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
- (
- len(environment.comment_start_string),
- TOKEN_COMMENT_BEGIN,
- e(environment.comment_start_string),
- ),
- (
- len(environment.block_start_string),
- TOKEN_BLOCK_BEGIN,
- e(environment.block_start_string),
- ),
- (
- len(environment.variable_start_string),
- TOKEN_VARIABLE_BEGIN,
- e(environment.variable_start_string),
- ),
+ (len(environment.comment_start_string), 'comment',
+ e(environment.comment_start_string)),
+ (len(environment.block_start_string), 'block',
+ e(environment.block_start_string)),
+ (len(environment.variable_start_string), 'variable',
+ e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
- rules.append(
- (
- len(environment.line_statement_prefix),
- TOKEN_LINESTATEMENT_BEGIN,
- r"^[ \t\v]*" + e(environment.line_statement_prefix),
- )
- )
+ rules.append((len(environment.line_statement_prefix), 'linestatement',
+ r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
- rules.append(
- (
- len(environment.line_comment_prefix),
- TOKEN_LINECOMMENT_BEGIN,
- r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
- )
- )
+ rules.append((len(environment.line_comment_prefix), 'linecomment',
+ r'(?:^|(?<=\S))[^\S\r\n]*' +
+ e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
@@ -258,7 +231,6 @@ def __call__(self, lineno, filename):
class Token(tuple):
"""Token class."""
-
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
@@ -268,7 +240,7 @@ def __new__(cls, lineno, type, value):
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
- elif self.type == "name":
+ elif self.type == 'name':
return self.value
return self.type
@@ -281,8 +253,8 @@ def test(self, expr):
# passed an iterable of not interned strings.
if self.type == expr:
return True
- elif ":" in expr:
- return expr.split(":", 1) == [self.type, self.value]
+ elif ':' in expr:
+ return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
@@ -293,7 +265,11 @@ def test_any(self, *iterable):
return False
def __repr__(self):
- return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
+ return 'Token(%r, %r, %r)' % (
+ self.lineno,
+ self.type,
+ self.value
+ )
@implements_iterator
@@ -330,7 +306,7 @@ def __init__(self, generator, name, filename):
self.name = name
self.filename = filename
self.closed = False
- self.current = Token(1, TOKEN_INITIAL, "")
+ self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
@@ -338,13 +314,9 @@ def __iter__(self):
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
-
__nonzero__ = __bool__ # py2
- @property
- def eos(self):
- """Are we at the end of the stream?"""
- return not self
+ eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
@@ -360,7 +332,7 @@ def look(self):
def skip(self, n=1):
"""Got n tokens ahead."""
- for _ in range(n):
+ for x in range(n):
next(self)
def next_if(self, expr):
@@ -391,7 +363,7 @@ def __next__(self):
def close(self):
"""Close the stream."""
- self.current = Token(self.current.lineno, TOKEN_EOF, "")
+ self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
@@ -402,18 +374,14 @@ def expect(self, expr):
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
- raise TemplateSyntaxError(
- "unexpected end of template, expected %r." % expr,
- self.current.lineno,
- self.name,
- self.filename,
- )
- raise TemplateSyntaxError(
- "expected token %r, got %r" % (expr, describe_token(self.current)),
- self.current.lineno,
- self.name,
- self.filename,
- )
+ raise TemplateSyntaxError('unexpected end of template, '
+ 'expected %r.' % expr,
+ self.current.lineno,
+ self.name, self.filename)
+ raise TemplateSyntaxError("expected token %r, got %r" %
+ (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name, self.filename)
try:
return self.current
finally:
@@ -422,20 +390,18 @@ def expect(self, expr):
def get_lexer(environment):
"""Return a lexer which is probably cached."""
- key = (
- environment.block_start_string,
- environment.block_end_string,
- environment.variable_start_string,
- environment.variable_end_string,
- environment.comment_start_string,
- environment.comment_end_string,
- environment.line_statement_prefix,
- environment.line_comment_prefix,
- environment.trim_blocks,
- environment.lstrip_blocks,
- environment.newline_sequence,
- environment.keep_trailing_newline,
- )
+ key = (environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
@@ -443,19 +409,6 @@ def get_lexer(environment):
return lexer
-class OptionalLStrip(tuple):
- """A special tuple for marking a point in the state that can have
- lstrip applied.
- """
-
- __slots__ = ()
-
- # Even though it looks like a no-op, creating instances fails
- # without this.
- def __new__(cls, *members, **kwargs):
- return super(OptionalLStrip, cls).__new__(cls, members)
-
-
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
@@ -466,11 +419,9 @@ class Lexer(object):
def __init__(self, environment):
# shortcuts
+ c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
- def c(x):
- return re.compile(x, re.M | re.S)
-
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
@@ -478,7 +429,7 @@ def c(x):
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
- (operator_re, TOKEN_OPERATOR, None),
+ (operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
@@ -490,120 +441,108 @@ def c(x):
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
- block_suffix_re = environment.trim_blocks and "\\n?" or ""
-
- # If lstrip is enabled, it should not be applied if there is any
- # non-whitespace between the newline and block.
- self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
+ block_suffix_re = environment.trim_blocks and '\\n?' or ''
+
+ # strip leading spaces if lstrip_blocks is enabled
+ prefix_re = {}
+ if environment.lstrip_blocks:
+ # use '{%+' to manually disable lstrip_blocks behavior
+ no_lstrip_re = e('+')
+ # detect overlap between block and variable or comment strings
+ block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
+ # make sure we don't mistake a block for a variable or a comment
+ m = block_diff.match(environment.comment_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+ m = block_diff.match(environment.variable_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+
+ # detect overlap between comment and variable strings
+ comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
+ m = comment_diff.match(environment.variable_start_string)
+ no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
+
+ lstrip_re = r'^[ \t]*'
+ block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
+ lstrip_re,
+ e(environment.block_start_string),
+ no_lstrip_re,
+ e(environment.block_start_string),
+ )
+ comment_prefix_re = r'%s%s%s|%s\+?' % (
+ lstrip_re,
+ e(environment.comment_start_string),
+ no_variable_re,
+ e(environment.comment_start_string),
+ )
+ prefix_re['block'] = block_prefix_re
+ prefix_re['comment'] = comment_prefix_re
+ else:
+ block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
- "root": [
+ 'root': [
# directives
- (
- c(
- "(.*?)(?:%s)"
- % "|".join(
- [
- r"(?P%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
- % (
- e(environment.block_start_string),
- e(environment.block_end_string),
- e(environment.block_end_string),
- )
- ]
- + [
- r"(?P<%s>%s(\-|\+|))" % (n, r)
- for n, r in root_tag_rules
- ]
- )
- ),
- OptionalLStrip(TOKEN_DATA, "#bygroup"),
- "#bygroup",
- ),
+ (c('(.*?)(?:%s)' % '|'.join(
+ [r'(?P(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string)
+ )] + [
+ r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
+ for n, r in root_tag_rules
+ ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
- (c(".+"), TOKEN_DATA, None),
+ (c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
- (
- c(
- r"(.*?)((?:\-%s\s*|%s)%s)"
- % (
- e(environment.comment_end_string),
- e(environment.comment_end_string),
- block_suffix_re,
- )
- ),
- (TOKEN_COMMENT, TOKEN_COMMENT_END),
- "#pop",
- ),
- (c("(.)"), (Failure("Missing end of comment tag"),), None),
+ (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re
+ )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
- (
- c(
- r"(?:\-%s\s*|%s)%s"
- % (
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re,
- )
- ),
- TOKEN_BLOCK_END,
- "#pop",
- ),
- ]
- + tag_rules,
+ (c(r'(?:\-%s\s*|%s)%s' % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), TOKEN_BLOCK_END, '#pop'),
+ ] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
- (
- c(
- r"\-%s\s*|%s"
- % (
- e(environment.variable_end_string),
- e(environment.variable_end_string),
- )
- ),
- TOKEN_VARIABLE_END,
- "#pop",
- )
- ]
- + tag_rules,
+ (c(r'\-%s\s*|%s' % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string)
+ )), TOKEN_VARIABLE_END, '#pop')
+ ] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
- (
- c(
- r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
- % (
- e(environment.block_start_string),
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re,
- )
- ),
- OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
- "#pop",
- ),
- (c("(.)"), (Failure("Missing end of raw directive"),), None),
+ (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
- (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
- ]
- + tag_rules,
+ (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
+ ] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
- (
- c(r"(.*?)()(?=\n|$)"),
- (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
- "#pop",
- )
- ],
+ (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
+ TOKEN_LINECOMMENT_END), '#pop')
+ ]
}
def _normalize_newlines(self, value):
@@ -611,7 +550,8 @@ def _normalize_newlines(self, value):
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
- """Calls tokeniter + tokenize and wraps it in a token stream."""
+ """Calls tokeniter + tokenize and wraps it in a token stream.
+ """
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
@@ -622,40 +562,37 @@ def wrap(self, stream, name=None, filename=None):
for lineno, token, value in stream:
if token in ignored_tokens:
continue
- elif token == TOKEN_LINESTATEMENT_BEGIN:
- token = TOKEN_BLOCK_BEGIN
- elif token == TOKEN_LINESTATEMENT_END:
- token = TOKEN_BLOCK_END
+ elif token == 'linestatement_begin':
+ token = 'block_begin'
+ elif token == 'linestatement_end':
+ token = 'block_end'
# we are not interested in those tokens in the parser
- elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
+ elif token in ('raw_begin', 'raw_end'):
continue
- elif token == TOKEN_DATA:
+ elif token == 'data':
value = self._normalize_newlines(value)
- elif token == "keyword":
+ elif token == 'keyword':
token = value
- elif token == TOKEN_NAME:
+ elif token == 'name':
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
- "Invalid character in identifier", lineno, name, filename
- )
- elif token == TOKEN_STRING:
+ 'Invalid character in identifier',
+ lineno, name, filename)
+ elif token == 'string':
# try to unescape string
try:
- value = (
- self._normalize_newlines(value[1:-1])
- .encode("ascii", "backslashreplace")
- .decode("unicode-escape")
- )
+ value = self._normalize_newlines(value[1:-1]) \
+ .encode('ascii', 'backslashreplace') \
+ .decode('unicode-escape')
except Exception as e:
- msg = str(e).split(":")[-1].strip()
+ msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
- elif token == TOKEN_INTEGER:
- value = int(value.replace("_", ""))
- elif token == TOKEN_FLOAT:
- # remove all "_" first to support more Python versions
- value = literal_eval(value.replace("_", ""))
- elif token == TOKEN_OPERATOR:
+ elif token == 'integer':
+ value = int(value)
+ elif token == 'float':
+ value = float(value)
+ elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
@@ -666,23 +603,23 @@ def tokeniter(self, source, name, filename=None, state=None):
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
- for newline in ("\r\n", "\r", "\n"):
+ for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
- lines.append("")
+ lines.append('')
break
- source = "\n".join(lines)
+ source = '\n'.join(lines)
pos = 0
lineno = 1
- stack = ["root"]
- if state is not None and state != "root":
- assert state in ("variable", "block"), "invalid state"
- stack.append(state + "_begin")
+ stack = ['root']
+ if state is not None and state != 'root':
+ assert state in ('variable', 'block'), 'invalid state'
+ stack.append(state + '_begin')
+ else:
+ state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
+
balancing_stack = []
- lstrip_unless_re = self.lstrip_unless_re
- newlines_stripped = 0
- line_starting = True
while 1:
# tokenizer loop
@@ -696,48 +633,13 @@ def tokeniter(self, source, name, filename=None, state=None):
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
- if balancing_stack and tokens in (
- TOKEN_VARIABLE_END,
- TOKEN_BLOCK_END,
- TOKEN_LINESTATEMENT_END,
- ):
+ if balancing_stack and \
+ tokens in ('variable_end', 'block_end',
+ 'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
- groups = m.groups()
-
- if isinstance(tokens, OptionalLStrip):
- # Rule supports lstrip. Match will look like
- # text, block type, whitespace control, type, control, ...
- text = groups[0]
-
- # Skipping the text and first type, every other group is the
- # whitespace control for each type. One of the groups will be
- # -, +, or empty string instead of None.
- strip_sign = next(g for g in groups[2::2] if g is not None)
-
- if strip_sign == "-":
- # Strip all whitespace between the text and the tag.
- stripped = text.rstrip()
- newlines_stripped = text[len(stripped) :].count("\n")
- groups = (stripped,) + groups[1:]
- elif (
- # Not marked for preserving whitespace.
- strip_sign != "+"
- # lstrip is enabled.
- and lstrip_unless_re is not None
- # Not a variable expression.
- and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
- ):
- # The start of text between the last newline and the tag.
- l_pos = text.rfind("\n") + 1
- if l_pos > 0 or line_starting:
- # If there's only whitespace between the newline and the
- # tag, strip it.
- if not lstrip_unless_re.search(text, l_pos):
- groups = (text[:l_pos],) + groups[1:]
-
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
@@ -745,57 +647,51 @@ def tokeniter(self, source, name, filename=None, state=None):
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
- elif token == "#bygroup":
+ elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
- lineno += value.count("\n")
+ lineno += value.count('\n')
break
else:
- raise RuntimeError(
- "%r wanted to resolve "
- "the token dynamically"
- " but no group matched" % regex
- )
+ raise RuntimeError('%r wanted to resolve '
+ 'the token dynamically'
+ ' but no group matched'
+ % regex)
# normal group
else:
- data = groups[idx]
+ data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
- lineno += data.count("\n") + newlines_stripped
- newlines_stripped = 0
+ lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
- if tokens == TOKEN_OPERATOR:
- if data == "{":
- balancing_stack.append("}")
- elif data == "(":
- balancing_stack.append(")")
- elif data == "[":
- balancing_stack.append("]")
- elif data in ("}", ")", "]"):
+ if tokens == 'operator':
+ if data == '{':
+ balancing_stack.append('}')
+ elif data == '(':
+ balancing_stack.append(')')
+ elif data == '[':
+ balancing_stack.append(']')
+ elif data in ('}', ')', ']'):
if not balancing_stack:
- raise TemplateSyntaxError(
- "unexpected '%s'" % data, lineno, name, filename
- )
+ raise TemplateSyntaxError('unexpected \'%s\'' %
+ data, lineno, name,
+ filename)
expected_op = balancing_stack.pop()
if expected_op != data:
- raise TemplateSyntaxError(
- "unexpected '%s', "
- "expected '%s'" % (data, expected_op),
- lineno,
- name,
- filename,
- )
+ raise TemplateSyntaxError('unexpected \'%s\', '
+ 'expected \'%s\'' %
+ (data, expected_op),
+ lineno, name,
+ filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
- lineno += data.count("\n")
-
- line_starting = m.group()[-1:] == "\n"
+ lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
@@ -805,20 +701,19 @@ def tokeniter(self, source, name, filename=None, state=None):
# handle state changes
if new_state is not None:
# remove the uppermost state
- if new_state == "#pop":
+ if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
- elif new_state == "#bygroup":
+ elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
- raise RuntimeError(
- "%r wanted to resolve the "
- "new state dynamically but"
- " no group matched" % regex
- )
+ raise RuntimeError('%r wanted to resolve the '
+ 'new state dynamically but'
+ ' no group matched' %
+ regex)
# direct state name given
else:
stack.append(new_state)
@@ -827,9 +722,8 @@ def tokeniter(self, source, name, filename=None, state=None):
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
- raise RuntimeError(
- "%r yielded empty string without stack change" % regex
- )
+ raise RuntimeError('%r yielded empty string without '
+ 'stack change' % regex)
# publish new function and start again
pos = pos2
break
@@ -840,9 +734,6 @@ def tokeniter(self, source, name, filename=None, state=None):
if pos >= source_length:
return
# something went wrong
- raise TemplateSyntaxError(
- "unexpected char %r at %d" % (source[pos], pos),
- lineno,
- name,
- filename,
- )
+ raise TemplateSyntaxError('unexpected char %r at %d' %
+ (source[pos], pos), lineno,
+ name, filename)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/loaders.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/loaders.py
old mode 100644
new mode 100755
index 457c4b59..4c797937
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/loaders.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/loaders.py
@@ -1,21 +1,22 @@
# -*- coding: utf-8 -*-
-"""API and implementations for loading templates from different data
-sources.
+"""
+ jinja2.loaders
+ ~~~~~~~~~~~~~~
+
+ Jinja loader classes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
-from hashlib import sha1
-from os import path
from types import ModuleType
-
-from ._compat import abc
-from ._compat import fspath
-from ._compat import iteritems
-from ._compat import string_types
-from .exceptions import TemplateNotFound
-from .utils import internalcode
-from .utils import open_if_exists
+from os import path
+from hashlib import sha1
+from jinja2.exceptions import TemplateNotFound
+from jinja2.utils import open_if_exists, internalcode
+from jinja2._compat import string_types, iteritems
def split_template_path(template):
@@ -23,14 +24,12 @@ def split_template_path(template):
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
- for piece in template.split("/"):
- if (
- path.sep in piece
- or (path.altsep and path.altsep in piece)
- or piece == path.pardir
- ):
+ for piece in template.split('/'):
+ if path.sep in piece \
+ or (path.altsep and path.altsep in piece) or \
+ piece == path.pardir:
raise TemplateNotFound(template)
- elif piece and piece != ".":
+ elif piece and piece != '.':
pieces.append(piece)
return pieces
@@ -87,16 +86,15 @@ def get_source(self, environment, template):
the template will be reloaded.
"""
if not self.has_source_access:
- raise RuntimeError(
- "%s cannot provide access to the source" % self.__class__.__name__
- )
+ raise RuntimeError('%s cannot provide access to the source' %
+ self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
- raise TypeError("this loader cannot iterate over all templates")
+ raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
@@ -133,9 +131,8 @@ def load(self, environment, name, globals=None):
bucket.code = code
bcc.set_bucket(bucket)
- return environment.template_class.from_code(
- environment, code, globals, uptodate
- )
+ return environment.template_class.from_code(environment, code,
+ globals, uptodate)
class FileSystemLoader(BaseLoader):
@@ -156,20 +153,14 @@ class FileSystemLoader(BaseLoader):
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
- .. versionchanged:: 2.8
- The ``followlinks`` parameter was added.
+ .. versionchanged:: 2.8+
+ The *followlinks* parameter was added.
"""
- def __init__(self, searchpath, encoding="utf-8", followlinks=False):
- if not isinstance(searchpath, abc.Iterable) or isinstance(
- searchpath, string_types
- ):
+ def __init__(self, searchpath, encoding='utf-8', followlinks=False):
+ if isinstance(searchpath, string_types):
searchpath = [searchpath]
-
- # In Python 3.5, os.path.join doesn't support Path. This can be
- # simplified to list(searchpath) when Python 3.5 is dropped.
- self.searchpath = [fspath(p) for p in searchpath]
-
+ self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
@@ -192,7 +183,6 @@ def uptodate():
return path.getmtime(filename) == mtime
except OSError:
return False
-
return contents, filename, uptodate
raise TemplateNotFound(template)
@@ -200,14 +190,12 @@ def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
- for dirpath, _, filenames in walk_dir:
+ for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
- template = (
- os.path.join(dirpath, filename)[len(searchpath) :]
- .strip(os.path.sep)
- .replace(os.path.sep, "/")
- )
- if template[:2] == "./":
+ template = os.path.join(dirpath, filename) \
+ [len(searchpath):].strip(os.path.sep) \
+ .replace(os.path.sep, '/')
+ if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
@@ -229,11 +217,10 @@ class PackageLoader(BaseLoader):
from the file system and not a zip file.
"""
- def __init__(self, package_name, package_path="templates", encoding="utf-8"):
- from pkg_resources import DefaultProvider
- from pkg_resources import get_provider
- from pkg_resources import ResourceManager
-
+ def __init__(self, package_name, package_path='templates',
+ encoding='utf-8'):
+ from pkg_resources import DefaultProvider, ResourceManager, \
+ get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
@@ -243,17 +230,14 @@ def __init__(self, package_name, package_path="templates", encoding="utf-8"):
def get_source(self, environment, template):
pieces = split_template_path(template)
- p = "/".join((self.package_path,) + tuple(pieces))
-
+ p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
-
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
-
def uptodate():
try:
return path.getmtime(filename) == mtime
@@ -265,24 +249,19 @@ def uptodate():
def list_templates(self):
path = self.package_path
-
- if path[:2] == "./":
+ if path[:2] == './':
path = path[2:]
- elif path == ".":
- path = ""
-
+ elif path == '.':
+ path = ''
offset = len(path)
results = []
-
def _walk(path):
for filename in self.provider.resource_listdir(path):
- fullname = path + "/" + filename
-
+ fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
- results.append(fullname[offset:].lstrip("/"))
-
+ results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
@@ -355,7 +334,7 @@ class PrefixLoader(BaseLoader):
by loading ``'app2/index.html'`` the file from the second.
"""
- def __init__(self, mapping, delimiter="/"):
+ def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
@@ -455,20 +434,19 @@ class ModuleLoader(BaseLoader):
has_source_access = False
def __init__(self, path):
- package_name = "_jinja2_module_templates_%x" % id(self)
+ package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
-
- if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
+ if isinstance(path, string_types):
path = [path]
+ else:
+ path = list(path)
+ mod.__path__ = path
- mod.__path__ = [fspath(p) for p in path]
-
- sys.modules[package_name] = weakref.proxy(
- mod, lambda x: sys.modules.pop(package_name, None)
- )
+ sys.modules[package_name] = weakref.proxy(mod,
+ lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
@@ -478,20 +456,20 @@ def __init__(self, path):
@staticmethod
def get_template_key(name):
- return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
+ return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
- return ModuleLoader.get_template_key(name) + ".py"
+ return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
- module = "%s.%s" % (self.package_name, key)
+ module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
- mod = __import__(module, None, None, ["root"])
+ mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
@@ -500,5 +478,4 @@ def load(self, environment, name, globals=None):
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
- environment, mod.__dict__, globals
- )
+ environment, mod.__dict__, globals)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/meta.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/meta.py
old mode 100644
new mode 100755
index 3795aace..7421914f
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/meta.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/meta.py
@@ -1,18 +1,25 @@
# -*- coding: utf-8 -*-
-"""Functions that expose information about templates that might be
-interesting for introspection.
"""
-from . import nodes
-from ._compat import iteritems
-from ._compat import string_types
-from .compiler import CodeGenerator
+ jinja2.meta
+ ~~~~~~~~~~~
+
+ This module implements various functions that exposes information about
+ templates that might be interesting for various kinds of applications.
+
+ :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.compiler import CodeGenerator
+from jinja2._compat import string_types, iteritems
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
- CodeGenerator.__init__(self, environment, "", "")
+ CodeGenerator.__init__(self, environment, '',
+ '')
self.undeclared_identifiers = set()
def write(self, x):
@@ -22,7 +29,7 @@ def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
- if action == "resolve" and param not in self.environment.globals:
+ if action == 'resolve':
self.undeclared_identifiers.add(param)
@@ -65,9 +72,8 @@ def find_referenced_templates(ast):
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
- for node in ast.find_all(
- (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
- ):
+ for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
+ nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
@@ -90,9 +96,8 @@ def find_referenced_templates(ast):
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
- elif isinstance(node, nodes.Include) and isinstance(
- node.template.value, (tuple, list)
- ):
+ elif isinstance(node, nodes.Include) and \
+ isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nativetypes.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nativetypes.py
old mode 100644
new mode 100755
index a9ead4e2..fe17e413
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nativetypes.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nativetypes.py
@@ -1,23 +1,19 @@
+import sys
from ast import literal_eval
-from itertools import chain
-from itertools import islice
-
-from . import nodes
-from ._compat import text_type
-from .compiler import CodeGenerator
-from .compiler import has_safe_repr
-from .environment import Environment
-from .environment import Template
+from itertools import islice, chain
+from jinja2 import nodes
+from jinja2._compat import text_type
+from jinja2.compiler import CodeGenerator, has_safe_repr
+from jinja2.environment import Environment, Template
+from jinja2.utils import concat, escape
def native_concat(nodes):
- """Return a native Python type from the list of compiled nodes. If
- the result is a single node, its value is returned. Otherwise, the
- nodes are concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
- the string is returned.
-
- :param nodes: Iterable of nodes to concatenate.
+ """Return a native Python type from the list of compiled nodes. If the
+ result is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
"""
head = list(islice(nodes, 2))
@@ -25,70 +21,200 @@ def native_concat(nodes):
return None
if len(head) == 1:
- raw = head[0]
+ out = head[0]
else:
- raw = u"".join([text_type(v) for v in chain(head, nodes)])
+ out = u''.join([text_type(v) for v in chain(head, nodes)])
try:
- return literal_eval(raw)
+ return literal_eval(out)
except (ValueError, SyntaxError, MemoryError):
- return raw
+ return out
class NativeCodeGenerator(CodeGenerator):
- """A code generator which renders Python types by not adding
- ``to_string()`` around output nodes.
+ """A code generator which avoids injecting ``to_string()`` calls around the
+ internal code Jinja uses to render templates.
"""
- @staticmethod
- def _default_finalize(value):
- return value
-
- def _output_const_repr(self, group):
- return repr(u"".join([text_type(v) for v in group]))
-
- def _output_child_to_const(self, node, frame, finalize):
- const = node.as_const(frame.eval_ctx)
-
- if not has_safe_repr(const):
- raise nodes.Impossible()
-
- if isinstance(node, nodes.TemplateData):
- return const
-
- return finalize.const(const)
-
- def _output_child_pre(self, node, frame, finalize):
- if finalize.src is not None:
- self.write(finalize.src)
-
- def _output_child_post(self, node, frame, finalize):
- if finalize.src is not None:
- self.write(")")
-
-
-class NativeEnvironment(Environment):
- """An environment that renders templates to native Python types."""
+ def visit_Output(self, node, frame):
+ """Same as :meth:`CodeGenerator.visit_Output`, but do not call
+ ``to_string`` on output nodes in generated code.
+ """
+ if self.has_known_extends and frame.require_output_check:
+ return
+
+ finalize = self.environment.finalize
+ finalize_context = getattr(finalize, 'contextfunction', False)
+ finalize_eval = getattr(finalize, 'evalcontextfunction', False)
+ finalize_env = getattr(finalize, 'environmentfunction', False)
+
+ if finalize is not None:
+ if finalize_context or finalize_eval:
+ const_finalize = None
+ elif finalize_env:
+ def const_finalize(x):
+ return finalize(self.environment, x)
+ else:
+ const_finalize = finalize
+ else:
+ def const_finalize(x):
+ return x
+
+ # If we are inside a frame that requires output checking, we do so.
+ outdent_later = False
+
+ if frame.require_output_check:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ outdent_later = True
+
+ # Try to evaluate as many chunks as possible into a static string at
+ # compile time.
+ body = []
+
+ for child in node.nodes:
+ try:
+ if const_finalize is None:
+ raise nodes.Impossible()
+
+ const = child.as_const(frame.eval_ctx)
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
+ except nodes.Impossible:
+ body.append(child)
+ continue
+
+ # the frame can't be volatile here, because otherwise the as_const
+ # function would raise an Impossible exception at that point
+ try:
+ if frame.eval_ctx.autoescape:
+ if hasattr(const, '__html__'):
+ const = const.__html__()
+ else:
+ const = escape(const)
+
+ const = const_finalize(const)
+ except Exception:
+ # if something goes wrong here we evaluate the node at runtime
+ # for easier debugging
+ body.append(child)
+ continue
+
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ # if we have less than 3 nodes or a buffer we yield or extend/append
+ if len(body) < 3 or frame.buffer is not None:
+ if frame.buffer is not None:
+ # for one item we append, for more we extend
+ if len(body) == 1:
+ self.writeline('%s.append(' % frame.buffer)
+ else:
+ self.writeline('%s.extend((' % frame.buffer)
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ val = repr(native_concat(item))
+
+ if frame.buffer is None:
+ self.writeline('yield ' + val)
+ else:
+ self.writeline(val + ',')
+ else:
+ if frame.buffer is None:
+ self.writeline('yield ', item)
+ else:
+ self.newline(item)
+
+ close = 0
+
+ if finalize is not None:
+ self.write('environment.finalize(')
+
+ if finalize_context:
+ self.write('context, ')
+
+ close += 1
+
+ self.visit(item, frame)
+
+ if close > 0:
+ self.write(')' * close)
+
+ if frame.buffer is not None:
+ self.write(',')
+
+ if frame.buffer is not None:
+ # close the open parentheses
+ self.outdent()
+ self.writeline(len(body) == 1 and ')' or '))')
+
+ # otherwise we create a format string as this is faster in that case
+ else:
+ format = []
+ arguments = []
+
+ for item in body:
+ if isinstance(item, list):
+ format.append(native_concat(item).replace('%', '%%'))
+ else:
+ format.append('%s')
+ arguments.append(item)
+
+ self.writeline('yield ')
+ self.write(repr(concat(format)) + ' % (')
+ self.indent()
+
+ for argument in arguments:
+ self.newline(argument)
+ close = 0
+
+ if finalize is not None:
+ self.write('environment.finalize(')
+
+ if finalize_context:
+ self.write('context, ')
+ elif finalize_eval:
+ self.write('context.eval_ctx, ')
+ elif finalize_env:
+ self.write('environment, ')
+
+ close += 1
+
+ self.visit(argument, frame)
+ self.write(')' * close + ', ')
+
+ self.outdent()
+ self.writeline(')')
- code_generator_class = NativeCodeGenerator
+ if outdent_later:
+ self.outdent()
class NativeTemplate(Template):
- environment_class = NativeEnvironment
-
def render(self, *args, **kwargs):
- """Render the template to produce a native Python type. If the
- result is a single node, its value is returned. Otherwise, the
- nodes are concatenated as strings. If the result can be parsed
- with :func:`ast.literal_eval`, the parsed value is returned.
- Otherwise, the string is returned.
+ """Render the template to produce a native Python type. If the result
+ is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
except Exception:
- return self.environment.handle_exception()
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
-NativeEnvironment.template_class = NativeTemplate
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
+ template_class = NativeTemplate
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nodes.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nodes.py
old mode 100644
new mode 100755
index 95bd614a..4d9a01ad
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nodes.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/nodes.py
@@ -1,39 +1,54 @@
# -*- coding: utf-8 -*-
-"""AST nodes generated by the parser for the compiler. Also provides
-some node tree helper functions used by the parser and compiler in order
-to normalize nodes.
"""
+ jinja2.nodes
+ ~~~~~~~~~~~~
+
+ This module implements additional nodes derived from the ast base node.
+
+ It also provides some node tree helper functions like `in_lineno` and
+ `get_nodes` used by the parser and translator in order to normalize
+ python and jinja nodes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import types
import operator
+
from collections import deque
+from jinja2.utils import Markup
+from jinja2._compat import izip, with_metaclass, text_type, PY2
+
-from markupsafe import Markup
+#: the types we support for context functions
+_context_function_types = (types.FunctionType, types.MethodType)
-from ._compat import izip
-from ._compat import PY2
-from ._compat import text_type
-from ._compat import with_metaclass
_binop_to_func = {
- "*": operator.mul,
- "/": operator.truediv,
- "//": operator.floordiv,
- "**": operator.pow,
- "%": operator.mod,
- "+": operator.add,
- "-": operator.sub,
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod,
+ '+': operator.add,
+ '-': operator.sub
}
-_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
+_uaop_to_func = {
+ 'not': operator.not_,
+ '+': operator.pos,
+ '-': operator.neg
+}
_cmpop_to_func = {
- "eq": operator.eq,
- "ne": operator.ne,
- "gt": operator.gt,
- "gteq": operator.ge,
- "lt": operator.lt,
- "lteq": operator.le,
- "in": lambda a, b: a in b,
- "notin": lambda a, b: a not in b,
+ 'eq': operator.eq,
+ 'ne': operator.ne,
+ 'gt': operator.gt,
+ 'gteq': operator.ge,
+ 'lt': operator.lt,
+ 'lteq': operator.le,
+ 'in': lambda a, b: a in b,
+ 'notin': lambda a, b: a not in b
}
@@ -46,16 +61,16 @@ class NodeType(type):
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
- def __new__(mcs, name, bases, d):
- for attr in "fields", "attributes":
+ def __new__(cls, name, bases, d):
+ for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
- assert len(bases) == 1, "multiple inheritance not allowed"
- assert len(storage) == len(set(storage)), "layout conflict"
+ assert len(bases) == 1, 'multiple inheritance not allowed'
+ assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
- d.setdefault("abstract", False)
- return type.__new__(mcs, name, bases, d)
+ d.setdefault('abstract', False)
+ return type.__new__(cls, name, bases, d)
class EvalContext(object):
@@ -82,17 +97,15 @@ def revert(self, old):
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
- raise RuntimeError(
- "if no eval context is passed, the "
- "node must have an attached "
- "environment."
- )
+ raise RuntimeError('if no eval context is passed, the '
+ 'node must have an attached '
+ 'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
- """Baseclass for all Jinja nodes. There are a number of nodes available
+ """Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
@@ -107,32 +120,30 @@ class Node(with_metaclass(NodeType, object)):
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
-
fields = ()
- attributes = ("lineno", "environment")
+ attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
- raise TypeError("abstract nodes are not instantiable")
+ raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
- raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
- raise TypeError(
- "%r takes 0 or %d argument%s"
- % (
- self.__class__.__name__,
- len(self.fields),
- len(self.fields) != 1 and "s" or "",
- )
- )
+ raise TypeError('%r takes 0 arguments' %
+ self.__class__.__name__)
+ raise TypeError('%r takes 0 or %d argument%s' % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and 's' or ''
+ ))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
- raise TypeError("unknown attribute %r" % next(iter(attributes)))
+ raise TypeError('unknown attribute %r' %
+ next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
@@ -142,11 +153,9 @@ def iter_fields(self, exclude=None, only=None):
should be sets or tuples of field names.
"""
for name in self.fields:
- if (
- (exclude is only is None)
- or (exclude is not None and name not in exclude)
- or (only is not None and name in only)
- ):
+ if (exclude is only is None) or \
+ (exclude is not None and name not in exclude) or \
+ (only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
@@ -157,7 +166,7 @@ def iter_child_nodes(self, exclude=None, only=None):
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
- for _, item in self.iter_fields(exclude, only):
+ for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
@@ -191,7 +200,7 @@ def set_ctx(self, ctx):
todo = deque([self])
while todo:
node = todo.popleft()
- if "ctx" in node.fields:
+ if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
@@ -201,7 +210,7 @@ def set_lineno(self, lineno, override=False):
todo = deque([self])
while todo:
node = todo.popleft()
- if "lineno" in node.attributes:
+ if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
@@ -217,9 +226,8 @@ def set_environment(self, environment):
return self
def __eq__(self, other):
- return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
- other.iter_fields()
- )
+ return type(self) is type(other) and \
+ tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
@@ -228,9 +236,10 @@ def __ne__(self, other):
__hash__ = object.__hash__
def __repr__(self):
- return "%s(%s)" % (
+ return '%s(%s)' % (
self.__class__.__name__,
- ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
+ ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
+ arg in self.fields)
)
def dump(self):
@@ -239,39 +248,37 @@ def _dump(node):
buf.append(repr(node))
return
- buf.append("nodes.%s(" % node.__class__.__name__)
+ buf.append('nodes.%s(' % node.__class__.__name__)
if not node.fields:
- buf.append(")")
+ buf.append(')')
return
for idx, field in enumerate(node.fields):
if idx:
- buf.append(", ")
+ buf.append(', ')
value = getattr(node, field)
if isinstance(value, list):
- buf.append("[")
+ buf.append('[')
for idx, item in enumerate(value):
if idx:
- buf.append(", ")
+ buf.append(', ')
_dump(item)
- buf.append("]")
+ buf.append(']')
else:
_dump(value)
- buf.append(")")
-
+ buf.append(')')
buf = []
_dump(self)
- return "".join(buf)
+ return ''.join(buf)
+
class Stmt(Node):
"""Base node for all statements."""
-
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
-
abstract = True
@@ -279,22 +286,19 @@ class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
-
- fields = ("body",)
+ fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
-
- fields = ("nodes",)
+ fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
-
- fields = ("template",)
+ fields = ('template',)
class For(Stmt):
@@ -305,14 +309,12 @@ class For(Stmt):
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
-
- fields = ("target", "iter", "body", "else_", "test", "recursive")
+ fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
-
- fields = ("test", "body", "elif_", "else_")
+ fields = ('test', 'body', 'elif_', 'else_')
class Macro(Stmt):
@@ -320,22 +322,19 @@ class Macro(Stmt):
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
-
- fields = ("name", "args", "defaults", "body")
+ fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
-
- fields = ("call", "args", "defaults", "body")
+ fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
-
- fields = ("body", "filter")
+ fields = ('body', 'filter')
class With(Stmt):
@@ -344,26 +343,22 @@ class With(Stmt):
.. versionadded:: 2.9.3
"""
-
- fields = ("targets", "values", "body")
+ fields = ('targets', 'values', 'body')
class Block(Stmt):
"""A node that represents a block."""
-
- fields = ("name", "body", "scoped")
+ fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
-
- fields = ("template", "with_context", "ignore_missing")
+ fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
-
- fields = ("template", "target", "with_context")
+ fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
@@ -377,31 +372,26 @@ class FromImport(Stmt):
The list of names may contain tuples if aliases are wanted.
"""
-
- fields = ("template", "names", "with_context")
+ fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
-
- fields = ("node",)
+ fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
-
- fields = ("target", "node")
+ fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
-
- fields = ("target", "filter", "body")
+ fields = ('target', 'filter', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
-
abstract = True
def as_const(self, eval_ctx=None):
@@ -424,18 +414,15 @@ def can_assign(self):
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
-
- fields = ("left", "right")
+ fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if (
- self.environment.sandboxed
- and self.operator in self.environment.intercepted_binops
- ):
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
@@ -446,18 +433,15 @@ def as_const(self, eval_ctx=None):
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
-
- fields = ("node",)
+ fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if (
- self.environment.sandboxed
- and self.operator in self.environment.intercepted_unops
- ):
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
@@ -474,17 +458,16 @@ class Name(Expr):
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
-
- fields = ("name", "ctx")
+ fields = ('name', 'ctx')
def can_assign(self):
- return self.name not in ("true", "false", "none", "True", "False", "None")
+ return self.name not in ('true', 'false', 'none',
+ 'True', 'False', 'None')
class NSRef(Expr):
"""Reference to a namespace value assignment"""
-
- fields = ("name", "attr")
+ fields = ('name', 'attr')
def can_assign(self):
# We don't need any special checks here; NSRef assignments have a
@@ -496,7 +479,6 @@ def can_assign(self):
class Literal(Expr):
"""Baseclass for literals."""
-
abstract = True
@@ -506,18 +488,14 @@ class Const(Literal):
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
-
- fields = ("value",)
+ fields = ('value',)
def as_const(self, eval_ctx=None):
rv = self.value
- if (
- PY2
- and type(rv) is text_type
- and self.environment.policies["compiler.ascii_str"]
- ):
+ if PY2 and type(rv) is text_type and \
+ self.environment.policies['compiler.ascii_str']:
try:
- rv = rv.encode("ascii")
+ rv = rv.encode('ascii')
except UnicodeError:
pass
return rv
@@ -529,7 +507,6 @@ def from_untrusted(cls, value, lineno=None, environment=None):
an `Impossible` exception.
"""
from .compiler import has_safe_repr
-
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
@@ -537,8 +514,7 @@ def from_untrusted(cls, value, lineno=None, environment=None):
class TemplateData(Literal):
"""A constant template string."""
-
- fields = ("data",)
+ fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -554,8 +530,7 @@ class Tuple(Literal):
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
-
- fields = ("items", "ctx")
+ fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -570,8 +545,7 @@ def can_assign(self):
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
-
- fields = ("items",)
+ fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -582,8 +556,7 @@ class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
-
- fields = ("items",)
+ fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -592,8 +565,7 @@ def as_const(self, eval_ctx=None):
class Pair(Helper):
"""A key, value pair for dicts."""
-
- fields = ("key", "value")
+ fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -602,8 +574,7 @@ def as_const(self, eval_ctx=None):
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
-
- fields = ("key", "value")
+ fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -614,8 +585,7 @@ class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
-
- fields = ("test", "expr1", "expr2")
+ fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -656,7 +626,7 @@ class Filter(Expr):
filtered. Buffers are created by macros and filter blocks.
"""
- fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -666,27 +636,28 @@ def as_const(self, eval_ctx=None):
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
- # call in a list because it is assuming we are talking about the
+ # call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
- if filter_ is None or getattr(filter_, "contextfilter", False) is True:
+ if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
- if eval_ctx.environment.is_async and getattr(
- filter_, "asyncfiltervariant", False
+ if (
+ eval_ctx.environment.is_async
+ and getattr(filter_, 'asyncfiltervariant', False)
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
- if getattr(filter_, "evalcontextfilter", False) is True:
+ if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
- elif getattr(filter_, "environmentfilter", False) is True:
+ elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
try:
@@ -700,7 +671,7 @@ class Test(Expr):
rest of the fields are the same as for :class:`Call`.
"""
- fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
test = self.environment.tests.get(self.name)
@@ -725,23 +696,20 @@ class Call(Expr):
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
-
- fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
-
- fields = ("node", "arg", "ctx")
+ fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- if self.ctx != "load":
+ if self.ctx != 'load':
raise Impossible()
try:
- return self.environment.getitem(
- self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
- )
+ return self.environment.getitem(self.node.as_const(eval_ctx),
+ self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
@@ -753,15 +721,15 @@ class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
-
- fields = ("node", "attr", "ctx")
+ fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
- if self.ctx != "load":
+ if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
- return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
+ return self.environment.getattr(self.node.as_const(eval_ctx),
+ self.attr)
except Exception:
raise Impossible()
@@ -773,17 +741,14 @@ class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
-
- fields = ("start", "stop", "step")
+ fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
-
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
-
return slice(const(self.start), const(self.stop), const(self.step))
@@ -791,103 +756,82 @@ class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
-
- fields = ("nodes",)
+ fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+ return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
-
- fields = ("expr", "ops")
+ fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
-
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
-
- if not result:
- return False
-
value = new_value
except Exception:
raise Impossible()
-
return result
class Operand(Helper):
"""Holds an operator and an expression."""
-
- fields = ("op", "expr")
-
+ fields = ('op', 'expr')
if __debug__:
- Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
- sorted(
- "``%s``" % x
- for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
- )
- )
+ Operand.__doc__ += '\nThe following operators are available: ' + \
+ ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+ set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
-
- operator = "*"
+ operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
-
- operator = "/"
+ operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
-
- operator = "//"
+ operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
-
- operator = "+"
+ operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
-
- operator = "-"
+ operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
-
- operator = "%"
+ operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
-
- operator = "**"
+ operator = '**'
class And(BinExpr):
"""Short circuited AND."""
-
- operator = "and"
+ operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -896,8 +840,7 @@ def as_const(self, eval_ctx=None):
class Or(BinExpr):
"""Short circuited OR."""
-
- operator = "or"
+ operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -906,20 +849,17 @@ def as_const(self, eval_ctx=None):
class Not(UnaryExpr):
"""Negate the expression."""
-
- operator = "not"
+ operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
-
- operator = "-"
+ operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
-
- operator = "+"
+ operator = '+'
# Helpers for extensions
@@ -929,8 +869,7 @@ class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
-
- fields = ("name",)
+ fields = ('name',)
class ExtensionAttribute(Expr):
@@ -940,8 +879,7 @@ class ExtensionAttribute(Expr):
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
-
- fields = ("identifier", "name")
+ fields = ('identifier', 'name')
class ImportedName(Expr):
@@ -950,8 +888,7 @@ class ImportedName(Expr):
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
-
- fields = ("importname",)
+ fields = ('importname',)
class InternalName(Expr):
@@ -961,20 +898,16 @@ class InternalName(Expr):
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
-
- fields = ("name",)
+ fields = ('name',)
def __init__(self):
- raise TypeError(
- "Can't create internal names. Use the "
- "`free_identifier` method on a parser."
- )
+ raise TypeError('Can\'t create internal names. Use the '
+ '`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
-
- fields = ("expr",)
+ fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -987,8 +920,7 @@ class MarkSafeIfAutoescape(Expr):
.. versionadded:: 2.5
"""
-
- fields = ("expr",)
+ fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -1010,20 +942,6 @@ class ContextReference(Expr):
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
-
- This is basically equivalent to using the
- :func:`~jinja2.contextfunction` decorator when using the
- high-level API, which causes a reference to the context to be passed
- as the first argument to a function.
- """
-
-
-class DerivedContextReference(Expr):
- """Return the current template context including locals. Behaves
- exactly like :class:`ContextReference`, but includes local
- variables, such as from a ``for`` loop.
-
- .. versionadded:: 2.11
"""
@@ -1037,8 +955,7 @@ class Break(Stmt):
class Scope(Stmt):
"""An artificial scope."""
-
- fields = ("body",)
+ fields = ('body',)
class OverlayScope(Stmt):
@@ -1054,8 +971,7 @@ class OverlayScope(Stmt):
.. versionadded:: 2.10
"""
-
- fields = ("context", "body")
+ fields = ('context', 'body')
class EvalContextModifier(Stmt):
@@ -1066,8 +982,7 @@ class EvalContextModifier(Stmt):
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
-
- fields = ("options",)
+ fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
@@ -1075,14 +990,10 @@ class ScopedEvalContextModifier(EvalContextModifier):
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
-
- fields = ("body",)
+ fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
- raise TypeError("can't create custom node types")
-
-
-NodeType.__new__ = staticmethod(_failing_new)
-del _failing_new
+ raise TypeError('can\'t create custom node types')
+NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/optimizer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/optimizer.py
old mode 100644
new mode 100755
index 7bc78c45..65ab3ceb
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/optimizer.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/optimizer.py
@@ -1,15 +1,23 @@
# -*- coding: utf-8 -*-
-"""The optimizer tries to constant fold expressions and modify the AST
-in place so that it should be faster to evaluate.
-
-Because the AST does not contain all the scoping information and the
-compiler has to find that out, we cannot do all the optimizations we
-want. For example, loop unrolling doesn't work because unrolled loops
-would have a different scope. The solution would be a second syntax tree
-that stored the scoping rules.
"""
-from . import nodes
-from .visitor import NodeTransformer
+ jinja2.optimizer
+ ~~~~~~~~~~~~~~~~
+
+ The jinja optimizer is currently trying to constant fold a few expressions
+ and modify the AST in place so that it should be easier to evaluate it.
+
+ Because the AST does not contain all the scoping information and the
+ compiler has to find that out, we cannot do all the optimizations we
+ want. For example loop unrolling doesn't work because unrolled loops would
+ have a different scoping.
+
+ The solution would be a second syntax tree that has the scoping rules stored.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.visitor import NodeTransformer
def optimize(node, environment):
@@ -20,22 +28,22 @@ def optimize(node, environment):
class Optimizer(NodeTransformer):
+
def __init__(self, environment):
self.environment = environment
- def generic_visit(self, node, *args, **kwargs):
- node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
-
- # Do constant folding. Some other nodes besides Expr have
- # as_const, but folding them causes errors later on.
- if isinstance(node, nodes.Expr):
- try:
- return nodes.Const.from_untrusted(
- node.as_const(args[0] if args else None),
- lineno=node.lineno,
- environment=self.environment,
- )
- except nodes.Impossible:
- pass
-
- return node
+ def fold(self, node, eval_ctx=None):
+ """Do constant folding."""
+ node = self.generic_visit(node)
+ try:
+ return nodes.Const.from_untrusted(node.as_const(eval_ctx),
+ lineno=node.lineno,
+ environment=self.environment)
+ except nodes.Impossible:
+ return node
+
+ visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
+ visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
+ visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
+ visit_Filter = visit_Test = visit_CondExpr = fold
+ del fold
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/parser.py
old mode 100644
new mode 100755
index d5881066..ed00d970
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/parser.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/parser.py
@@ -1,46 +1,41 @@
# -*- coding: utf-8 -*-
-"""Parse tokens from the lexer into nodes for the compiler."""
-from . import nodes
-from ._compat import imap
-from .exceptions import TemplateAssertionError
-from .exceptions import TemplateSyntaxError
-from .lexer import describe_token
-from .lexer import describe_token_expr
-
-_statement_keywords = frozenset(
- [
- "for",
- "if",
- "block",
- "extends",
- "print",
- "macro",
- "include",
- "from",
- "import",
- "set",
- "with",
- "autoescape",
- ]
-)
-_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
+"""
+ jinja2.parser
+ ~~~~~~~~~~~~~
+
+ Implements the template parser.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
+from jinja2.lexer import describe_token, describe_token_expr
+from jinja2._compat import imap
+
+
+_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
+ 'macro', 'include', 'from', 'import',
+ 'set', 'with', 'autoescape'])
+_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
- "add": nodes.Add,
- "sub": nodes.Sub,
- "mul": nodes.Mul,
- "div": nodes.Div,
- "floordiv": nodes.FloorDiv,
- "mod": nodes.Mod,
+ 'add': nodes.Add,
+ 'sub': nodes.Sub,
+ 'mul': nodes.Mul,
+ 'div': nodes.Div,
+ 'floordiv': nodes.FloorDiv,
+ 'mod': nodes.Mod,
}
class Parser(object):
- """This is the central parsing class Jinja uses. It's passed to
+ """This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
- def __init__(self, environment, source, name=None, filename=None, state=None):
+ def __init__(self, environment, source, name=None, filename=None,
+ state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
@@ -68,37 +63,31 @@ def _fail_ut_eof(self, name, end_token_stack, lineno):
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
- currently_looking = " or ".join(
- "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
- )
+ currently_looking = ' or '.join(
+ "'%s'" % describe_token_expr(expr)
+ for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
- message = ["Unexpected end of template."]
+ message = ['Unexpected end of template.']
else:
- message = ["Encountered unknown tag '%s'." % name]
+ message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
- message.append(
- "You probably made a nesting mistake. Jinja "
- "is expecting this tag, but currently looking "
- "for %s." % currently_looking
- )
+ message.append('You probably made a nesting mistake. Jinja '
+ 'is expecting this tag, but currently looking '
+ 'for %s.' % currently_looking)
else:
- message.append(
- "Jinja was looking for the following tags: "
- "%s." % currently_looking
- )
+ message.append('Jinja was looking for the following tags: '
+ '%s.' % currently_looking)
if self._tag_stack:
- message.append(
- "The innermost block that needs to be "
- "closed is '%s'." % self._tag_stack[-1]
- )
+ message.append('The innermost block that needs to be '
+ 'closed is \'%s\'.' % self._tag_stack[-1])
- self.fail(" ".join(message), lineno)
+ self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
@@ -116,7 +105,7 @@ def fail_eof(self, end_tokens=None, lineno=None):
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
- if self.stream.current.type in ("variable_end", "block_end", "rparen"):
+ if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
@@ -126,22 +115,22 @@ def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
- nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
+ nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
- if token.type != "name":
- self.fail("tag name expected", token.lineno)
+ if token.type != 'name':
+ self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
- return getattr(self, "parse_" + self.stream.current.value)()
- if token.value == "call":
+ return getattr(self, 'parse_' + self.stream.current.value)()
+ if token.value == 'call':
return self.parse_call_block()
- if token.value == "filter":
+ if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
@@ -168,16 +157,16 @@ def parse_statements(self, end_tokens, drop_needle=False):
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
- self.stream.skip_if("colon")
+ self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
- self.stream.expect("block_end")
+ self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
- if self.stream.current.type == "eof":
+ if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
@@ -188,47 +177,50 @@ def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
- if self.stream.skip_if("assign"):
+ if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
- body = self.parse_statements(("name:endset",), drop_needle=True)
+ body = self.parse_statements(('name:endset',),
+ drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
- lineno = self.stream.expect("name:for").lineno
- target = self.parse_assign_target(extra_end_rules=("name:in",))
- self.stream.expect("name:in")
- iter = self.parse_tuple(
- with_condexpr=False, extra_end_rules=("name:recursive",)
- )
+ lineno = self.stream.expect('name:for').lineno
+ target = self.parse_assign_target(extra_end_rules=('name:in',))
+ self.stream.expect('name:in')
+ iter = self.parse_tuple(with_condexpr=False,
+ extra_end_rules=('name:recursive',))
test = None
- if self.stream.skip_if("name:if"):
+ if self.stream.skip_if('name:if'):
test = self.parse_expression()
- recursive = self.stream.skip_if("name:recursive")
- body = self.parse_statements(("name:endfor", "name:else"))
- if next(self.stream).value == "endfor":
+ recursive = self.stream.skip_if('name:recursive')
+ body = self.parse_statements(('name:endfor', 'name:else'))
+ if next(self.stream).value == 'endfor':
else_ = []
else:
- else_ = self.parse_statements(("name:endfor",), drop_needle=True)
- return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
+ else_ = self.parse_statements(('name:endfor',), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test,
+ recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
- node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
+ node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
- node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
+ node.body = self.parse_statements(('name:elif', 'name:else',
+ 'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
- if token.test("name:elif"):
+ if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
- elif token.test("name:else"):
- result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
+ elif token.test('name:else'):
+ result.else_ = self.parse_statements(('name:endif',),
+ drop_needle=True)
break
return result
@@ -236,42 +228,45 @@ def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
- while self.stream.current.type != "block_end":
+ while self.stream.current.type != 'block_end':
+ lineno = self.stream.current.lineno
if targets:
- self.stream.expect("comma")
+ self.stream.expect('comma')
target = self.parse_assign_target()
- target.set_ctx("param")
+ target.set_ctx('param')
targets.append(target)
- self.stream.expect("assign")
+ self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
- node.body = self.parse_statements(("name:endwith",), drop_needle=True)
+ node.body = self.parse_statements(('name:endwith',),
+ drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
- node.options = [nodes.Keyword("autoescape", self.parse_expression())]
- node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
+ node.options = [
+ nodes.Keyword('autoescape', self.parse_expression())
+ ]
+ node.body = self.parse_statements(('name:endautoescape',),
+ drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
- node.name = self.stream.expect("name").value
- node.scoped = self.stream.skip_if("name:scoped")
+ node.name = self.stream.expect('name').value
+ node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
- if self.stream.current.type == "sub":
- self.fail(
- "Block names in Jinja have to be valid Python "
- "identifiers and may not contain hyphens, use an "
- "underscore instead."
- )
-
- node.body = self.parse_statements(("name:endblock",), drop_needle=True)
- self.stream.skip_if("name:" + node.name)
+ if self.stream.current.type == 'sub':
+ self.fail('Block names in Jinja have to be valid Python '
+ 'identifiers and may not contain hyphens, use an '
+ 'underscore instead.')
+
+ node.body = self.parse_statements(('name:endblock',), drop_needle=True)
+ self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
@@ -280,10 +275,9 @@ def parse_extends(self):
return node
def parse_import_context(self, node, default):
- if self.stream.current.test_any(
- "name:with", "name:without"
- ) and self.stream.look().test("name:context"):
- node.with_context = next(self.stream).value == "with"
+ if self.stream.current.test_any('name:with', 'name:without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
@@ -292,9 +286,8 @@ def parse_import_context(self, node, default):
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- if self.stream.current.test("name:ignore") and self.stream.look().test(
- "name:missing"
- ):
+ if self.stream.current.test('name:ignore') and \
+ self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
@@ -304,71 +297,67 @@ def parse_include(self):
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect("name:as")
+ self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect("name:import")
+ self.stream.expect('name:import')
node.names = []
def parse_context():
- if self.stream.current.value in (
- "with",
- "without",
- ) and self.stream.look().test("name:context"):
- node.with_context = next(self.stream).value == "with"
+ if self.stream.current.value in ('with', 'without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
- self.stream.expect("comma")
- if self.stream.current.type == "name":
+ self.stream.expect('comma')
+ if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
- if target.name.startswith("_"):
- self.fail(
- "names starting with an underline can not be imported",
- target.lineno,
- exc=TemplateAssertionError,
- )
- if self.stream.skip_if("name:as"):
+ if target.name.startswith('_'):
+ self.fail('names starting with an underline can not '
+ 'be imported', target.lineno,
+ exc=TemplateAssertionError)
+ if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
- if parse_context() or self.stream.current.type != "comma":
+ if parse_context() or self.stream.current.type != 'comma':
break
else:
- self.stream.expect("name")
- if not hasattr(node, "with_context"):
+ self.stream.expect('name')
+ if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
- self.stream.expect("lparen")
- while self.stream.current.type != "rparen":
+ self.stream.expect('lparen')
+ while self.stream.current.type != 'rparen':
if args:
- self.stream.expect("comma")
+ self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
- arg.set_ctx("param")
- if self.stream.skip_if("assign"):
+ arg.set_ctx('param')
+ if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
- self.fail("non-default argument follows default argument")
+ self.fail('non-default argument follows default argument')
args.append(arg)
- self.stream.expect("rparen")
+ self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
- if self.stream.current.type == "lparen":
+ if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
@@ -376,40 +365,37 @@ def parse_call_block(self):
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
- self.fail("expected call", node.lineno)
- node.body = self.parse_statements(("name:endcall",), drop_needle=True)
+ self.fail('expected call', node.lineno)
+ node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
- node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
+ node.body = self.parse_statements(('name:endfilter',),
+ drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
- node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
+ node.body = self.parse_statements(('name:endmacro',),
+ drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
- while self.stream.current.type != "block_end":
+ while self.stream.current.type != 'block_end':
if node.nodes:
- self.stream.expect("comma")
+ self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
- def parse_assign_target(
- self,
- with_tuple=True,
- name_only=False,
- extra_end_rules=None,
- with_namespace=False,
- ):
- """Parse an assignment target. As Jinja allows assignments to
+ def parse_assign_target(self, with_tuple=True, name_only=False,
+ extra_end_rules=None, with_namespace=False):
+ """Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
@@ -417,26 +403,24 @@ def parse_assign_target(
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
- if with_namespace and self.stream.look().type == "dot":
- token = self.stream.expect("name")
+ if with_namespace and self.stream.look().type == 'dot':
+ token = self.stream.expect('name')
next(self.stream) # dot
- attr = self.stream.expect("name")
+ attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
- token = self.stream.expect("name")
- target = nodes.Name(token.value, "store", lineno=token.lineno)
+ token = self.stream.expect('name')
+ target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
- target = self.parse_tuple(
- simplified=True, extra_end_rules=extra_end_rules
- )
+ target = self.parse_tuple(simplified=True,
+ extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
- target.set_ctx("store")
+ target.set_ctx('store')
if not target.can_assign():
- self.fail(
- "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
- )
+ self.fail('can\'t assign to %r' % target.__class__.
+ __name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
@@ -451,9 +435,9 @@ def parse_expression(self, with_condexpr=True):
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
- while self.stream.skip_if("name:if"):
+ while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
- if self.stream.skip_if("name:else"):
+ if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
@@ -464,7 +448,7 @@ def parse_condexpr(self):
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
- while self.stream.skip_if("name:or"):
+ while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
@@ -473,14 +457,14 @@ def parse_or(self):
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
- while self.stream.skip_if("name:and"):
+ while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
- if self.stream.current.test("name:not"):
+ if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
@@ -494,13 +478,12 @@ def parse_compare(self):
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
- elif self.stream.skip_if("name:in"):
- ops.append(nodes.Operand("in", self.parse_math1()))
- elif self.stream.current.test("name:not") and self.stream.look().test(
- "name:in"
- ):
+ elif self.stream.skip_if('name:in'):
+ ops.append(nodes.Operand('in', self.parse_math1()))
+ elif (self.stream.current.test('name:not') and
+ self.stream.look().test('name:in')):
self.stream.skip(2)
- ops.append(nodes.Operand("notin", self.parse_math1()))
+ ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
@@ -511,7 +494,7 @@ def parse_compare(self):
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
- while self.stream.current.type in ("add", "sub"):
+ while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
@@ -522,7 +505,7 @@ def parse_math1(self):
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
- while self.stream.current.type == "tilde":
+ while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
@@ -532,7 +515,7 @@ def parse_concat(self):
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
- while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
+ while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
@@ -543,7 +526,7 @@ def parse_math2(self):
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
- while self.stream.current.type == "pow":
+ while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
@@ -553,10 +536,10 @@ def parse_pow(self):
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
- if token_type == "sub":
+ if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
- elif token_type == "add":
+ elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
@@ -568,44 +551,40 @@ def parse_unary(self, with_filter=True):
def parse_primary(self):
token = self.stream.current
- if token.type == "name":
- if token.value in ("true", "false", "True", "False"):
- node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
- elif token.value in ("none", "None"):
+ if token.type == 'name':
+ if token.value in ('true', 'false', 'True', 'False'):
+ node = nodes.Const(token.value in ('true', 'True'),
+ lineno=token.lineno)
+ elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
- node = nodes.Name(token.value, "load", lineno=token.lineno)
+ node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
- elif token.type == "string":
+ elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
- while self.stream.current.type == "string":
+ while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
- node = nodes.Const("".join(buf), lineno=lineno)
- elif token.type in ("integer", "float"):
+ node = nodes.Const(''.join(buf), lineno=lineno)
+ elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
- elif token.type == "lparen":
+ elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
- self.stream.expect("rparen")
- elif token.type == "lbracket":
+ self.stream.expect('rparen')
+ elif token.type == 'lbracket':
node = self.parse_list()
- elif token.type == "lbrace":
+ elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
- def parse_tuple(
- self,
- simplified=False,
- with_condexpr=True,
- extra_end_rules=None,
- explicit_parentheses=False,
- ):
+ def parse_tuple(self, simplified=False, with_condexpr=True,
+ extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
@@ -630,19 +609,16 @@ def parse_tuple(
elif with_condexpr:
parse = self.parse_expression
else:
-
- def parse():
- return self.parse_expression(with_condexpr=False)
-
+ parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
- self.stream.expect("comma")
+ self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
- if self.stream.current.type == "comma":
+ if self.stream.current.type == 'comma':
is_tuple = True
else:
break
@@ -657,48 +633,46 @@ def parse():
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
- self.fail(
- "Expected an expression, got '%s'"
- % describe_token(self.stream.current)
- )
+ self.fail('Expected an expression, got \'%s\'' %
+ describe_token(self.stream.current))
- return nodes.Tuple(args, "load", lineno=lineno)
+ return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
- token = self.stream.expect("lbracket")
+ token = self.stream.expect('lbracket')
items = []
- while self.stream.current.type != "rbracket":
+ while self.stream.current.type != 'rbracket':
if items:
- self.stream.expect("comma")
- if self.stream.current.type == "rbracket":
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
- self.stream.expect("rbracket")
+ self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
- token = self.stream.expect("lbrace")
+ token = self.stream.expect('lbrace')
items = []
- while self.stream.current.type != "rbrace":
+ while self.stream.current.type != 'rbrace':
if items:
- self.stream.expect("comma")
- if self.stream.current.type == "rbrace":
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
- self.stream.expect("colon")
+ self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
- self.stream.expect("rbrace")
+ self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == "dot" or token_type == "lbracket":
+ if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == "lparen":
+ elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
@@ -707,13 +681,13 @@ def parse_postfix(self, node):
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == "pipe":
+ if token_type == 'pipe':
node = self.parse_filter(node)
- elif token_type == "name" and self.stream.current.value == "is":
+ elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == "lparen":
+ elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
@@ -721,54 +695,53 @@ def parse_filter_expr(self, node):
def parse_subscript(self, node):
token = next(self.stream)
- if token.type == "dot":
+ if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
- if attr_token.type == "name":
- return nodes.Getattr(
- node, attr_token.value, "load", lineno=token.lineno
- )
- elif attr_token.type != "integer":
- self.fail("expected name or number", attr_token.lineno)
+ if attr_token.type == 'name':
+ return nodes.Getattr(node, attr_token.value, 'load',
+ lineno=token.lineno)
+ elif attr_token.type != 'integer':
+ self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
- return nodes.Getitem(node, arg, "load", lineno=token.lineno)
- if token.type == "lbracket":
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ if token.type == 'lbracket':
args = []
- while self.stream.current.type != "rbracket":
+ while self.stream.current.type != 'rbracket':
if args:
- self.stream.expect("comma")
+ self.stream.expect('comma')
args.append(self.parse_subscribed())
- self.stream.expect("rbracket")
+ self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
- arg = nodes.Tuple(args, "load", lineno=token.lineno)
- return nodes.Getitem(node, arg, "load", lineno=token.lineno)
- self.fail("expected subscript expression", token.lineno)
+ arg = nodes.Tuple(args, 'load', lineno=token.lineno)
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
- if self.stream.current.type == "colon":
+ if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
- if self.stream.current.type != "colon":
+ if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
- if self.stream.current.type == "colon":
+ if self.stream.current.type == 'colon':
args.append(None)
- elif self.stream.current.type not in ("rbracket", "comma"):
+ elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
- if self.stream.current.type == "colon":
+ if self.stream.current.type == 'colon':
next(self.stream)
- if self.stream.current.type not in ("rbracket", "comma"):
+ if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
@@ -778,7 +751,7 @@ def parse_subscribed(self):
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
- token = self.stream.expect("lparen")
+ token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
@@ -786,100 +759,91 @@ def parse_call(self, node):
def ensure(expr):
if not expr:
- self.fail("invalid syntax for function call expression", token.lineno)
+ self.fail('invalid syntax for function call expression',
+ token.lineno)
- while self.stream.current.type != "rparen":
+ while self.stream.current.type != 'rparen':
if require_comma:
- self.stream.expect("comma")
+ self.stream.expect('comma')
# support for trailing comma
- if self.stream.current.type == "rparen":
+ if self.stream.current.type == 'rparen':
break
- if self.stream.current.type == "mul":
+ if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
- elif self.stream.current.type == "pow":
+ elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
- if (
- self.stream.current.type == "name"
- and self.stream.look().type == "assign"
- ):
- # Parsing a kwarg
- ensure(dyn_kwargs is None)
+ ensure(dyn_args is None and dyn_kwargs is None)
+ if self.stream.current.type == 'name' and \
+ self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
- kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
+ kwargs.append(nodes.Keyword(key, value,
+ lineno=value.lineno))
else:
- # Parsing an arg
- ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
+ ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
- self.stream.expect("rparen")
+ self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
- return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
+ lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
- while self.stream.current.type == "pipe" or start_inline:
+ while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
- token = self.stream.expect("name")
+ token = self.stream.expect('name')
name = token.value
- while self.stream.current.type == "dot":
+ while self.stream.current.type == 'dot':
next(self.stream)
- name += "." + self.stream.expect("name").value
- if self.stream.current.type == "lparen":
+ name += '.' + self.stream.expect('name').value
+ if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
- node = nodes.Filter(
- node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
- )
+ node = nodes.Filter(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
- if self.stream.current.test("name:not"):
+ if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
- name = self.stream.expect("name").value
- while self.stream.current.type == "dot":
+ name = self.stream.expect('name').value
+ while self.stream.current.type == 'dot':
next(self.stream)
- name += "." + self.stream.expect("name").value
+ name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
- if self.stream.current.type == "lparen":
+ if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- elif self.stream.current.type in (
- "name",
- "string",
- "integer",
- "float",
- "lparen",
- "lbracket",
- "lbrace",
- ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
- if self.stream.current.test("name:is"):
- self.fail("You cannot chain multiple tests with is")
- arg_node = self.parse_primary()
- arg_node = self.parse_postfix(arg_node)
- args = [arg_node]
+ elif (self.stream.current.type in ('name', 'string', 'integer',
+ 'float', 'lparen', 'lbracket',
+ 'lbrace') and not
+ self.stream.current.test_any('name:else', 'name:or',
+ 'name:and')):
+ if self.stream.current.test('name:is'):
+ self.fail('You cannot chain multiple tests with is')
+ args = [self.parse_primary()]
else:
args = []
- node = nodes.Test(
- node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
- )
+ node = nodes.Test(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
@@ -901,29 +865,29 @@ def flush_data():
try:
while self.stream:
token = self.stream.current
- if token.type == "data":
+ if token.type == 'data':
if token.value:
- add_data(nodes.TemplateData(token.value, lineno=token.lineno))
+ add_data(nodes.TemplateData(token.value,
+ lineno=token.lineno))
next(self.stream)
- elif token.type == "variable_begin":
+ elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
- self.stream.expect("variable_end")
- elif token.type == "block_begin":
+ self.stream.expect('variable_end')
+ elif token.type == 'block_begin':
flush_data()
next(self.stream)
- if end_tokens is not None and self.stream.current.test_any(
- *end_tokens
- ):
+ if end_tokens is not None and \
+ self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
- self.stream.expect("block_end")
+ self.stream.expect('block_end')
else:
- raise AssertionError("internal parsing error")
+ raise AssertionError('internal parsing error')
flush_data()
finally:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/runtime.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/runtime.py
old mode 100644
new mode 100755
index 3ad79686..f9d7a680
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/runtime.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/runtime.py
@@ -1,62 +1,43 @@
# -*- coding: utf-8 -*-
-"""The runtime functions and state used by compiled templates."""
+"""
+ jinja2.runtime
+ ~~~~~~~~~~~~~~
+
+ Runtime helpers.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
import sys
+
from itertools import chain
from types import MethodType
-from markupsafe import escape # noqa: F401
-from markupsafe import Markup
-from markupsafe import soft_unicode
-
-from ._compat import abc
-from ._compat import imap
-from ._compat import implements_iterator
-from ._compat import implements_to_string
-from ._compat import iteritems
-from ._compat import PY2
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import with_metaclass
-from .exceptions import TemplateNotFound # noqa: F401
-from .exceptions import TemplateRuntimeError # noqa: F401
-from .exceptions import UndefinedError
-from .nodes import EvalContext
-from .utils import concat
-from .utils import evalcontextfunction
-from .utils import internalcode
-from .utils import missing
-from .utils import Namespace # noqa: F401
-from .utils import object_type_repr
+from jinja2.nodes import EvalContext, _context_function_types
+from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
+ internalcode, object_type_repr, evalcontextfunction, Namespace
+from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
+ TemplateNotFound
+from jinja2._compat import imap, text_type, iteritems, \
+ implements_iterator, implements_to_string, string_types, PY2, \
+ with_metaclass
+
# these variables are exported to the template runtime
-exported = [
- "LoopContext",
- "TemplateReference",
- "Macro",
- "Markup",
- "TemplateRuntimeError",
- "missing",
- "concat",
- "escape",
- "markup_join",
- "unicode_join",
- "to_string",
- "identity",
- "TemplateNotFound",
- "Namespace",
- "Undefined",
-]
+__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
+ 'TemplateRuntimeError', 'missing', 'concat', 'escape',
+ 'markup_join', 'unicode_join', 'to_string', 'identity',
+ 'TemplateNotFound', 'Namespace']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
+#: the identity function. Useful for certain things in the environment
+identity = lambda x: x
-def identity(x):
- """Returns its argument. Useful for certain things in the
- environment.
- """
- return x
+_first_iteration = object()
+_last_iteration = object()
def markup_join(seq):
@@ -65,8 +46,8 @@ def markup_join(seq):
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
- if hasattr(arg, "__html__"):
- return Markup(u"").join(chain(buf, iterator))
+ if hasattr(arg, '__html__'):
+ return Markup(u'').join(chain(buf, iterator))
return concat(buf)
@@ -75,16 +56,9 @@ def unicode_join(seq):
return concat(imap(text_type, seq))
-def new_context(
- environment,
- template_name,
- blocks,
- vars=None,
- shared=None,
- globals=None,
- locals=None,
-):
- """Internal helper for context creation."""
+def new_context(environment, template_name, blocks, vars=None,
+ shared=None, globals=None, locals=None):
+ """Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
@@ -99,7 +73,8 @@ def new_context(
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
- return environment.context_class(environment, parent, template_name, blocks)
+ return environment.context_class(environment, parent, template_name,
+ blocks)
class TemplateReference(object):
@@ -113,16 +88,20 @@ def __getitem__(self, name):
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__, self.__context.name)
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self.__context.name
+ )
def _get_func(x):
- return getattr(x, "__func__", x)
+ return getattr(x, '__func__', x)
class ContextMeta(type):
- def __new__(mcs, name, bases, d):
- rv = type.__new__(mcs, name, bases, d)
+
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
if bases == ():
return rv
@@ -133,15 +112,11 @@ def __new__(mcs, name, bases, d):
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
- if (
- resolve is not default_resolve
- and resolve_or_missing is default_resolve_or_missing
- ):
+ if resolve is not default_resolve and \
+ resolve_or_missing is default_resolve_or_missing:
rv._legacy_resolve_mode = True
- elif (
- resolve is default_resolve
- and resolve_or_missing is default_resolve_or_missing
- ):
+ elif resolve is default_resolve and \
+ resolve_or_missing is default_resolve_or_missing:
rv._fast_resolve_mode = True
return rv
@@ -174,7 +149,6 @@ class Context(with_metaclass(ContextMeta)):
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
-
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
@@ -205,9 +179,9 @@ def super(self, name, current):
index = blocks.index(current) + 1
blocks[index]
except LookupError:
- return self.environment.undefined(
- "there is no parent block called %r." % name, name="super"
- )
+ return self.environment.undefined('there is no parent block '
+ 'called %r.' % name,
+ name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
@@ -258,7 +232,7 @@ def get_all(self):
return dict(self.parent, **self.vars)
@internalcode
- def call(__self, __obj, *args, **kwargs): # noqa: B902
+ def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
@@ -268,62 +242,55 @@ def call(__self, __obj, *args, **kwargs): # noqa: B902
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
- if hasattr(__obj, "__call__"): # noqa: B004
+ if hasattr(__obj, '__call__'):
fn = __obj.__call__
- for fn_type in (
- "contextfunction",
- "evalcontextfunction",
- "environmentfunction",
- ):
+ for fn_type in ('contextfunction',
+ 'evalcontextfunction',
+ 'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
- if callable(__obj):
- if getattr(__obj, "contextfunction", False) is True:
+ if isinstance(__obj, _context_function_types):
+ if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
- elif getattr(__obj, "evalcontextfunction", False) is True:
+ elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
- elif getattr(__obj, "environmentfunction", False) is True:
+ elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
- return __self.environment.undefined(
- "value was undefined because "
- "a callable raised a "
- "StopIteration exception"
- )
+ return __self.environment.undefined('value was undefined because '
+ 'a callable raised a '
+ 'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
- context = new_context(
- self.environment, self.name, {}, self.get_all(), True, None, locals
- )
+ context = new_context(self.environment, self.name, {},
+ self.get_all(), True, None, locals)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
- def _all(meth): # noqa: B902
- def proxy(self):
- return getattr(self.get_all(), meth)()
-
+ def _all(meth):
+ proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
- keys = _all("keys")
- values = _all("values")
- items = _all("items")
+ keys = _all('keys')
+ values = _all('values')
+ items = _all('items')
# not available on python 3
if PY2:
- iterkeys = _all("iterkeys")
- itervalues = _all("itervalues")
- iteritems = _all("iteritems")
+ iterkeys = _all('iterkeys')
+ itervalues = _all('itervalues')
+ iteritems = _all('iteritems')
del _all
def __contains__(self, name):
@@ -339,14 +306,19 @@ def __getitem__(self, key):
return item
def __repr__(self):
- return "<%s %s of %r>" % (
+ return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
- self.name,
+ self.name
)
-abc.Mapping.register(Context)
+# register the context as mapping if possible
+try:
+ from collections import Mapping
+ Mapping.register(Context)
+except ImportError:
+ pass
class BlockReference(object):
@@ -362,10 +334,11 @@ def __init__(self, name, context, stack, depth):
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
- return self._context.environment.undefined(
- "there is no parent block called %r." % self.name, name="super"
- )
- return BlockReference(self.name, self._context, self._stack, self._depth + 1)
+ return self._context.environment. \
+ undefined('there is no parent block called %r.' %
+ self.name, name='super')
+ return BlockReference(self.name, self._context, self._stack,
+ self._depth + 1)
@internalcode
def __call__(self):
@@ -375,212 +348,143 @@ def __call__(self):
return rv
-@implements_iterator
-class LoopContext:
- """A wrapper iterable for dynamic ``for`` loops, with information
- about the loop and iteration.
- """
-
- #: Current iteration of the loop, starting at 0.
- index0 = -1
+class LoopContextBase(object):
+ """A loop context for dynamic iteration."""
+ _before = _first_iteration
+ _current = _first_iteration
+ _after = _last_iteration
_length = None
- _after = missing
- _current = missing
- _before = missing
- _last_changed_value = missing
- def __init__(self, iterable, undefined, recurse=None, depth0=0):
- """
- :param iterable: Iterable to wrap.
- :param undefined: :class:`Undefined` class to use for next and
- previous items.
- :param recurse: The function to render the loop body when the
- loop is marked recursive.
- :param depth0: Incremented when looping recursively.
- """
- self._iterable = iterable
- self._iterator = self._to_iterator(iterable)
+ def __init__(self, undefined, recurse=None, depth0=0):
self._undefined = undefined
self._recurse = recurse
- #: How many levels deep a recursive loop currently is, starting at 0.
+ self.index0 = -1
self.depth0 = depth0
+ self._last_checked_value = missing
- @staticmethod
- def _to_iterator(iterable):
- return iter(iterable)
-
- @property
- def length(self):
- """Length of the iterable.
-
- If the iterable is a generator or otherwise does not have a
- size, it is eagerly evaluated to get a size.
- """
- if self._length is not None:
- return self._length
-
- try:
- self._length = len(self._iterable)
- except TypeError:
- iterable = list(self._iterator)
- self._iterator = self._to_iterator(iterable)
- self._length = len(iterable) + self.index + (self._after is not missing)
-
- return self._length
-
- def __len__(self):
- return self.length
-
- @property
- def depth(self):
- """How many levels deep a recursive loop currently is, starting at 1."""
- return self.depth0 + 1
-
- @property
- def index(self):
- """Current iteration of the loop, starting at 1."""
- return self.index0 + 1
-
- @property
- def revindex0(self):
- """Number of iterations from the end of the loop, ending at 0.
-
- Requires calculating :attr:`length`.
- """
- return self.length - self.index
-
- @property
- def revindex(self):
- """Number of iterations from the end of the loop, ending at 1.
-
- Requires calculating :attr:`length`.
- """
- return self.length - self.index0
-
- @property
- def first(self):
- """Whether this is the first iteration of the loop."""
- return self.index0 == 0
-
- def _peek_next(self):
- """Return the next element in the iterable, or :data:`missing`
- if the iterable is exhausted. Only peeks one item ahead, caching
- the result in :attr:`_last` for use in subsequent checks. The
- cache is reset when :meth:`__next__` is called.
- """
- if self._after is not missing:
- return self._after
-
- self._after = next(self._iterator, missing)
- return self._after
+ def cycle(self, *args):
+ """Cycles among the arguments with the current loop index."""
+ if not args:
+ raise TypeError('no items for cycling given')
+ return args[self.index0 % len(args)]
- @property
- def last(self):
- """Whether this is the last iteration of the loop.
+ def changed(self, *value):
+ """Checks whether the value has changed since the last call."""
+ if self._last_checked_value != value:
+ self._last_checked_value = value
+ return True
+ return False
- Causes the iterable to advance early. See
- :func:`itertools.groupby` for issues this can cause.
- The :func:`groupby` filter avoids that issue.
- """
- return self._peek_next() is missing
+ first = property(lambda x: x.index0 == 0)
+ last = property(lambda x: x._after is _last_iteration)
+ index = property(lambda x: x.index0 + 1)
+ revindex = property(lambda x: x.length - x.index0)
+ revindex0 = property(lambda x: x.length - x.index)
+ depth = property(lambda x: x.depth0 + 1)
@property
def previtem(self):
- """The item in the previous iteration. Undefined during the
- first iteration.
- """
- if self.first:
- return self._undefined("there is no previous item")
-
+ if self._before is _first_iteration:
+ return self._undefined('there is no previous item')
return self._before
@property
def nextitem(self):
- """The item in the next iteration. Undefined during the last
- iteration.
+ if self._after is _last_iteration:
+ return self._undefined('there is no next item')
+ return self._after
- Causes the iterable to advance early. See
- :func:`itertools.groupby` for issues this can cause.
- The :func:`groupby` filter avoids that issue.
- """
- rv = self._peek_next()
+ def __len__(self):
+ return self.length
- if rv is missing:
- return self._undefined("there is no next item")
+ @internalcode
+ def loop(self, iterable):
+ if self._recurse is None:
+ raise TypeError('Tried to call non recursive loop. Maybe you '
+ "forgot the 'recursive' modifier.")
+ return self._recurse(iterable, self._recurse, self.depth0 + 1)
- return rv
+ # a nifty trick to enhance the error message if someone tried to call
+ # the the loop without or with too many arguments.
+ __call__ = loop
+ del loop
- def cycle(self, *args):
- """Return a value from the given args, cycling through based on
- the current :attr:`index0`.
+ def __repr__(self):
+ return '<%s %r/%r>' % (
+ self.__class__.__name__,
+ self.index,
+ self.length
+ )
- :param args: One or more values to cycle through.
- """
- if not args:
- raise TypeError("no items for cycling given")
- return args[self.index0 % len(args)]
+class LoopContext(LoopContextBase):
- def changed(self, *value):
- """Return ``True`` if previously called with a different value
- (including when called for the first time).
+ def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ LoopContextBase.__init__(self, undefined, recurse, depth0)
+ self._iterator = iter(iterable)
- :param value: One or more values to compare to the last call.
- """
- if self._last_changed_value != value:
- self._last_changed_value = value
- return True
+ # try to get the length of the iterable early. This must be done
+ # here because there are some broken iterators around where there
+ # __len__ is the number of iterations left (i'm looking at your
+ # listreverseiterator!).
+ try:
+ self._length = len(iterable)
+ except (TypeError, AttributeError):
+ self._length = None
+ self._after = self._safe_next()
- return False
+ @property
+ def length(self):
+ if self._length is None:
+ # if was not possible to get the length of the iterator when
+ # the loop context was created (ie: iterating over a generator)
+ # we have to convert the iterable into a sequence and use the
+ # length of that + the number of iterations so far.
+ iterable = tuple(self._iterator)
+ self._iterator = iter(iterable)
+ iterations_done = self.index0 + 2
+ self._length = len(iterable) + iterations_done
+ return self._length
def __iter__(self):
- return self
+ return LoopContextIterator(self)
- def __next__(self):
- if self._after is not missing:
- rv = self._after
- self._after = missing
- else:
- rv = next(self._iterator)
+ def _safe_next(self):
+ try:
+ return next(self._iterator)
+ except StopIteration:
+ return _last_iteration
- self.index0 += 1
- self._before = self._current
- self._current = rv
- return rv, self
- @internalcode
- def __call__(self, iterable):
- """When iterating over nested data, render the body of the loop
- recursively with the given inner iterable data.
+@implements_iterator
+class LoopContextIterator(object):
+ """The iterator for a loop context."""
+ __slots__ = ('context',)
- The loop must have the ``recursive`` marker for this to work.
- """
- if self._recurse is None:
- raise TypeError(
- "The loop must have the 'recursive' marker to be called recursively."
- )
+ def __init__(self, context):
+ self.context = context
- return self._recurse(iterable, self._recurse, depth=self.depth)
+ def __iter__(self):
+ return self
- def __repr__(self):
- return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
+ def __next__(self):
+ ctx = self.context
+ ctx.index0 += 1
+ if ctx._after is _last_iteration:
+ raise StopIteration()
+ ctx._before = ctx._current
+ ctx._current = ctx._after
+ ctx._after = ctx._safe_next()
+ return ctx._current, ctx
class Macro(object):
"""Wraps a macro function."""
- def __init__(
- self,
- environment,
- func,
- name,
- arguments,
- catch_kwargs,
- catch_varargs,
- caller,
- default_autoescape=None,
- ):
+ def __init__(self, environment, func, name, arguments,
+ catch_kwargs, catch_varargs, caller,
+ default_autoescape=None):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
@@ -589,7 +493,7 @@ def __init__(
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
- self.explicit_caller = "caller" in arguments
+ self.explicit_caller = 'caller' in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@@ -601,8 +505,9 @@ def __call__(self, *args, **kwargs):
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
- # problematic for macros because whether a macro is safe depends not
- # on the escape mode when it was defined, but rather when it was used.
+ # problemtic for macros because if a macro is safe or not not so
+ # much depends on the escape mode when it was defined but when it
+ # was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
@@ -610,7 +515,7 @@ def __call__(self, *args, **kwargs):
# check here.
#
# This is considered safe because an eval context is not a valid
- # argument to callables otherwise anyway. Worst case here is
+ # argument to callables otherwise anwyays. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
@@ -620,7 +525,7 @@ def __call__(self, *args, **kwargs):
autoescape = self._default_autoescape
# try to consume the positional arguments
- arguments = list(args[: self._argument_count])
+ arguments = list(args[:self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
@@ -631,12 +536,12 @@ def __call__(self, *args, **kwargs):
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
- for name in self.arguments[len(arguments) :]:
+ for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
value = missing
- if name == "caller":
+ if name == 'caller':
found_caller = True
arguments.append(value)
else:
@@ -646,31 +551,26 @@ def __call__(self, *args, **kwargs):
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
- caller = kwargs.pop("caller", None)
+ caller = kwargs.pop('caller', None)
if caller is None:
- caller = self._environment.undefined("No caller defined", name="caller")
+ caller = self._environment.undefined('No caller defined',
+ name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
- if "caller" in kwargs:
- raise TypeError(
- "macro %r was invoked with two values for "
- "the special caller argument. This is "
- "most likely a bug." % self.name
- )
- raise TypeError(
- "macro %r takes no keyword argument %r"
- % (self.name, next(iter(kwargs)))
- )
+ if 'caller' in kwargs:
+ raise TypeError('macro %r was invoked with two values for '
+ 'the special caller argument. This is '
+ 'most likely a bug.' % self.name)
+ raise TypeError('macro %r takes no keyword argument %r' %
+ (self.name, next(iter(kwargs))))
if self.catch_varargs:
- arguments.append(args[self._argument_count :])
+ arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
- raise TypeError(
- "macro %r takes not more than %d argument(s)"
- % (self.name, len(self.arguments))
- )
+ raise TypeError('macro %r takes not more than %d argument(s)' %
+ (self.name, len(self.arguments)))
return self._invoke(arguments, autoescape)
@@ -682,16 +582,16 @@ def _invoke(self, arguments, autoescape):
return rv
def __repr__(self):
- return "<%s %s>" % (
+ return '<%s %s>' % (
self.__class__.__name__,
- self.name is None and "anonymous" or repr(self.name),
+ self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
- iterated over, but every other access will raise an :exc:`UndefinedError`:
+ iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
@@ -703,13 +603,8 @@ class Undefined(object):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
-
- __slots__ = (
- "_undefined_hint",
- "_undefined_obj",
- "_undefined_name",
- "_undefined_exception",
- )
+ __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
+ '_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
@@ -717,86 +612,40 @@ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_name = name
self._undefined_exception = exc
- @property
- def _undefined_message(self):
- """Build a message about the undefined value based on how it was
- accessed.
- """
- if self._undefined_hint:
- return self._undefined_hint
-
- if self._undefined_obj is missing:
- return "%r is undefined" % self._undefined_name
-
- if not isinstance(self._undefined_name, string_types):
- return "%s has no element %r" % (
- object_type_repr(self._undefined_obj),
- self._undefined_name,
- )
-
- return "%r has no attribute %r" % (
- object_type_repr(self._undefined_obj),
- self._undefined_name,
- )
-
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
- """Raise an :exc:`UndefinedError` when operations are performed
- on the undefined value.
+ """Regular callback function for undefined objects that raises an
+ `jinja2.exceptions.UndefinedError` on call.
"""
- raise self._undefined_exception(self._undefined_message)
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ hint = '%r is undefined' % self._undefined_name
+ elif not isinstance(self._undefined_name, string_types):
+ hint = '%s has no element %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = '%r has no attribute %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = self._undefined_hint
+ raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
- if name[:2] == "__":
+ if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
- __add__ = (
- __radd__
- ) = (
- __mul__
- ) = (
- __rmul__
- ) = (
- __div__
- ) = (
- __rdiv__
- ) = (
- __truediv__
- ) = (
- __rtruediv__
- ) = (
- __floordiv__
- ) = (
- __rfloordiv__
- ) = (
- __mod__
- ) = (
- __rmod__
- ) = (
- __pos__
- ) = (
- __neg__
- ) = (
- __call__
- ) = (
- __getitem__
- ) = (
- __lt__
- ) = (
- __le__
- ) = (
- __gt__
- ) = (
- __ge__
- ) = (
- __int__
- ) = (
- __float__
- ) = (
- __complex__
- ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
+ __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
+ __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
+ __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
+ __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
+ __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
+ __rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
@@ -808,7 +657,7 @@ def __hash__(self):
return id(type(self))
def __str__(self):
- return u""
+ return u''
def __len__(self):
return 0
@@ -819,11 +668,10 @@ def __iter__(self):
def __nonzero__(self):
return False
-
__bool__ = __nonzero__
def __repr__(self):
- return "Undefined"
+ return 'Undefined'
def make_logging_undefined(logger=None, base=None):
@@ -848,7 +696,6 @@ def make_logging_undefined(logger=None, base=None):
"""
if logger is None:
import logging
-
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
@@ -857,27 +704,26 @@ def make_logging_undefined(logger=None, base=None):
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
- hint = "%s is undefined" % undef._undefined_name
+ hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
- hint = "%s has no element %s" % (
+ hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name,
- )
+ undef._undefined_name)
else:
- hint = "%s has no attribute %s" % (
+ hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name,
- )
+ undef._undefined_name)
else:
hint = undef._undefined_hint
- logger.warning("Template variable warning: %s", hint)
+ logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
+
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
- logger.error("Template variable error: %s", str(e))
+ logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
@@ -891,7 +737,6 @@ def __iter__(self):
return rv
if PY2:
-
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
@@ -901,9 +746,7 @@ def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
-
else:
-
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
@@ -912,36 +755,6 @@ def __bool__(self):
return LoggingUndefined
-# No @implements_to_string decorator here because __str__
-# is not overwritten from Undefined in this class.
-# This would cause a recursion error in Python 2.
-class ChainableUndefined(Undefined):
- """An undefined that is chainable, where both ``__getattr__`` and
- ``__getitem__`` return itself rather than raising an
- :exc:`UndefinedError`.
-
- >>> foo = ChainableUndefined(name='foo')
- >>> str(foo.bar['baz'])
- ''
- >>> foo.bar['baz'] + 42
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
-
- .. versionadded:: 2.11.0
- """
-
- __slots__ = ()
-
- def __html__(self):
- return self.__str__()
-
- def __getattr__(self, _):
- return self
-
- __getitem__ = __getattr__
-
-
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
@@ -956,18 +769,17 @@ class DebugUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
-
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
- return u"{{ %s }}" % self._undefined_name
- return "{{ no such element: %s[%r] }}" % (
+ return u'{{ %s }}' % self._undefined_name
+ return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
- self._undefined_name,
+ self._undefined_name
)
- return u"{{ undefined value printed: %s }}" % self._undefined_hint
+ return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
@@ -990,22 +802,12 @@ class StrictUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
-
__slots__ = ()
- __iter__ = (
- __str__
- ) = (
- __len__
- ) = (
- __nonzero__
- ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
+ __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
+ __ne__ = __bool__ = __hash__ = \
+ Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
-del (
- Undefined.__slots__,
- ChainableUndefined.__slots__,
- DebugUndefined.__slots__,
- StrictUndefined.__slots__,
-)
+del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/sandbox.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/sandbox.py
old mode 100644
new mode 100755
index cfd7993a..752e8128
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/sandbox.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/sandbox.py
@@ -1,66 +1,71 @@
# -*- coding: utf-8 -*-
-"""A sandbox layer that ensures unsafe operations cannot be performed.
-Useful when the template itself comes from an untrusted source.
"""
-import operator
+ jinja2.sandbox
+ ~~~~~~~~~~~~~~
+
+ Adds a sandbox layer to Jinja as it was the default behavior in the old
+ Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
+ default behavior is easier to use.
+
+ The behavior can be changed by subclassing the environment.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
import types
-import warnings
-from collections import deque
-from string import Formatter
+import operator
+from collections import Mapping
+from jinja2.environment import Environment
+from jinja2.exceptions import SecurityError
+from jinja2._compat import string_types, PY2
+from jinja2.utils import Markup
from markupsafe import EscapeFormatter
-from markupsafe import Markup
+from string import Formatter
-from ._compat import abc
-from ._compat import PY2
-from ._compat import range_type
-from ._compat import string_types
-from .environment import Environment
-from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
- UNSAFE_FUNCTION_ATTRIBUTES = {
- "func_closure",
- "func_code",
- "func_dict",
- "func_defaults",
- "func_globals",
- }
+ UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
+ 'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
+
#: unsafe method attributes. function attributes are unsafe for methods too
-UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
+UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
-#: unsafe generator attributes.
-UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
+#: unsafe generator attirbutes.
+UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
#: unsafe attributes on coroutines
-UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
+UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
#: unsafe attributes on async generators
-UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
+
+import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
-warnings.filterwarnings(
- "ignore", "the sets module", DeprecationWarning, module=__name__
-)
+warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
+ module='jinja2.sandbox')
+
+from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
+
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
-
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
@@ -69,60 +74,36 @@
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
-
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
-_mutable_set_types += (abc.MutableSet,)
-_mutable_mapping_types += (abc.MutableMapping,)
-_mutable_sequence_types += (abc.MutableSequence,)
+from collections import MutableSet, MutableMapping, MutableSequence
+_mutable_set_types += (MutableSet,)
+_mutable_mapping_types += (MutableMapping,)
+_mutable_sequence_types += (MutableSequence,)
+
_mutable_spec = (
- (
- _mutable_set_types,
- frozenset(
- [
- "add",
- "clear",
- "difference_update",
- "discard",
- "pop",
- "remove",
- "symmetric_difference_update",
- "update",
- ]
- ),
- ),
- (
- _mutable_mapping_types,
- frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
- ),
- (
- _mutable_sequence_types,
- frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
- ),
- (
- deque,
- frozenset(
- [
- "append",
- "appendleft",
- "clear",
- "extend",
- "extendleft",
- "pop",
- "popleft",
- "remove",
- "rotate",
- ]
- ),
- ),
+ (_mutable_set_types, frozenset([
+ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
+ 'symmetric_difference_update', 'update'
+ ])),
+ (_mutable_mapping_types, frozenset([
+ 'clear', 'pop', 'popitem', 'setdefault', 'update'
+ ])),
+ (_mutable_sequence_types, frozenset([
+ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
+ ])),
+ (deque, frozenset([
+ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
+ 'popleft', 'remove', 'rotate'
+ ]))
)
-class _MagicFormatMapping(abc.Mapping):
+class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
@@ -136,7 +117,7 @@ def __init__(self, args, kwargs):
self._last_index = 0
def __getitem__(self, key):
- if key == "":
+ if key == '':
idx = self._last_index
self._last_index += 1
try:
@@ -154,9 +135,9 @@ def __len__(self):
def inspect_format_method(callable):
- if not isinstance(
- callable, (types.MethodType, types.BuiltinMethodType)
- ) or callable.__name__ not in ("format", "format_map"):
+ if not isinstance(callable, (types.MethodType,
+ types.BuiltinMethodType)) or \
+ callable.__name__ not in ('format', 'format_map'):
return None
obj = callable.__self__
if isinstance(obj, string_types):
@@ -167,14 +148,10 @@ def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
- rng = range_type(*args)
-
+ rng = range(*args)
if len(rng) > MAX_RANGE:
- raise OverflowError(
- "Range too big. The sandbox blocks ranges larger than"
- " MAX_RANGE (%d)." % MAX_RANGE
- )
-
+ raise OverflowError('range too big, maximum size for range is %d' %
+ MAX_RANGE)
return rng
@@ -207,25 +184,24 @@ def is_internal_attribute(obj, attr):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
- if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
+ attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
- if attr == "mro":
+ if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
- elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
+ elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
- elif hasattr(types, "AsyncGeneratorType") and isinstance(
- obj, types.AsyncGeneratorType
- ):
+ elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
- return attr.startswith("__")
+ return attr.startswith('__')
def modifies_known_mutable(obj, attr):
@@ -266,26 +242,28 @@ class SandboxedEnvironment(Environment):
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
-
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
- "+": operator.add,
- "-": operator.sub,
- "*": operator.mul,
- "/": operator.truediv,
- "//": operator.floordiv,
- "**": operator.pow,
- "%": operator.mod,
+ '+': operator.add,
+ '-': operator.sub,
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
- default_unop_table = {"+": operator.pos, "-": operator.neg}
+ default_unop_table = {
+ '+': operator.pos,
+ '-': operator.neg
+ }
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
@@ -321,7 +299,7 @@ class SandboxedEnvironment(Environment):
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
- method returns `True`, :meth:`call_unop` is executed for this unary
+ method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
@@ -335,9 +313,10 @@ def intercept_unop(self, operator):
"""
return False
+
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
- self.globals["range"] = safe_range
+ self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
@@ -348,7 +327,7 @@ def is_safe_attribute(self, obj, attr, value):
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
- return not (attr.startswith("_") or is_internal_attribute(obj, attr))
+ return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
@@ -356,9 +335,8 @@ def is_safe_callable(self, obj):
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
- return not (
- getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
- )
+ return not (getattr(obj, 'unsafe_callable', False) or
+ getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
@@ -418,13 +396,11 @@ def getattr(self, obj, attribute):
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
- return self.undefined(
- "access to attribute %r of %r "
- "object is unsafe." % (attribute, obj.__class__.__name__),
- name=attribute,
- obj=obj,
- exc=SecurityError,
- )
+ return self.undefined('access to attribute %r of %r '
+ 'object is unsafe.' % (
+ attribute,
+ obj.__class__.__name__
+ ), name=attribute, obj=obj, exc=SecurityError)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
@@ -435,10 +411,10 @@ def format_string(self, s, args, kwargs, format_func=None):
else:
formatter = SandboxedFormatter(self)
- if format_func is not None and format_func.__name__ == "format_map":
+ if format_func is not None and format_func.__name__ == 'format_map':
if len(args) != 1 or kwargs:
raise TypeError(
- "format_map() takes exactly one argument %d given"
+ 'format_map() takes exactly one argument %d given'
% (len(args) + (kwargs is not None))
)
@@ -449,7 +425,7 @@ def format_string(self, s, args, kwargs, format_func=None):
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
- def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
+ def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
@@ -458,7 +434,7 @@ def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
- raise SecurityError("%r is not safely callable" % (__obj,))
+ raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
@@ -474,16 +450,16 @@ def is_safe_attribute(self, obj, attr, value):
return not modifies_known_mutable(obj, attr)
-# This really is not a public API apparently.
+# This really is not a public API apparenlty.
try:
from _string import formatter_field_name_split
except ImportError:
-
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
+
def __init__(self, env):
self._env = env
@@ -497,14 +473,14 @@ def get_field(self, field_name, args, kwargs):
obj = self._env.getitem(obj, i)
return obj, first
-
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
-
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/tests.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/tests.py
old mode 100644
new mode 100755
index fabd4ce5..0adc3d4d
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/tests.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/tests.py
@@ -1,17 +1,24 @@
# -*- coding: utf-8 -*-
-"""Built-in template tests used with the ``is`` operator."""
-import decimal
+"""
+ jinja2.tests
+ ~~~~~~~~~~~~
+
+ Jinja test functions. Used with the "is" operator.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
import operator
import re
+from collections import Mapping
+from jinja2.runtime import Undefined
+from jinja2._compat import text_type, string_types, integer_types
+import decimal
-from ._compat import abc
-from ._compat import integer_types
-from ._compat import string_types
-from ._compat import text_type
-from .runtime import Undefined
-
-number_re = re.compile(r"^-?\d+(\.\d+)?$")
+number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
+
+
test_callable = callable
@@ -57,48 +64,6 @@ def test_none(value):
return value is None
-def test_boolean(value):
- """Return true if the object is a boolean value.
-
- .. versionadded:: 2.11
- """
- return value is True or value is False
-
-
-def test_false(value):
- """Return true if the object is False.
-
- .. versionadded:: 2.11
- """
- return value is False
-
-
-def test_true(value):
- """Return true if the object is True.
-
- .. versionadded:: 2.11
- """
- return value is True
-
-
-# NOTE: The existing 'number' test matches booleans and floats
-def test_integer(value):
- """Return true if the object is an integer.
-
- .. versionadded:: 2.11
- """
- return isinstance(value, integer_types) and value is not True and value is not False
-
-
-# NOTE: The existing 'number' test matches booleans and integers
-def test_float(value):
- """Return true if the object is a float.
-
- .. versionadded:: 2.11
- """
- return isinstance(value, float)
-
-
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
@@ -119,7 +84,7 @@ def test_mapping(value):
.. versionadded:: 2.6
"""
- return isinstance(value, abc.Mapping)
+ return isinstance(value, Mapping)
def test_number(value):
@@ -134,7 +99,7 @@ def test_sequence(value):
try:
len(value)
value.__getitem__
- except Exception:
+ except:
return False
return True
@@ -163,7 +128,7 @@ def test_iterable(value):
def test_escaped(value):
"""Check if the value is escaped."""
- return hasattr(value, "__html__")
+ return hasattr(value, '__html__')
def test_in(value, seq):
@@ -175,41 +140,36 @@ def test_in(value, seq):
TESTS = {
- "odd": test_odd,
- "even": test_even,
- "divisibleby": test_divisibleby,
- "defined": test_defined,
- "undefined": test_undefined,
- "none": test_none,
- "boolean": test_boolean,
- "false": test_false,
- "true": test_true,
- "integer": test_integer,
- "float": test_float,
- "lower": test_lower,
- "upper": test_upper,
- "string": test_string,
- "mapping": test_mapping,
- "number": test_number,
- "sequence": test_sequence,
- "iterable": test_iterable,
- "callable": test_callable,
- "sameas": test_sameas,
- "escaped": test_escaped,
- "in": test_in,
- "==": operator.eq,
- "eq": operator.eq,
- "equalto": operator.eq,
- "!=": operator.ne,
- "ne": operator.ne,
- ">": operator.gt,
- "gt": operator.gt,
- "greaterthan": operator.gt,
- "ge": operator.ge,
- ">=": operator.ge,
- "<": operator.lt,
- "lt": operator.lt,
- "lessthan": operator.lt,
- "<=": operator.le,
- "le": operator.le,
+ 'odd': test_odd,
+ 'even': test_even,
+ 'divisibleby': test_divisibleby,
+ 'defined': test_defined,
+ 'undefined': test_undefined,
+ 'none': test_none,
+ 'lower': test_lower,
+ 'upper': test_upper,
+ 'string': test_string,
+ 'mapping': test_mapping,
+ 'number': test_number,
+ 'sequence': test_sequence,
+ 'iterable': test_iterable,
+ 'callable': test_callable,
+ 'sameas': test_sameas,
+ 'escaped': test_escaped,
+ 'in': test_in,
+ '==': operator.eq,
+ 'eq': operator.eq,
+ 'equalto': operator.eq,
+ '!=': operator.ne,
+ 'ne': operator.ne,
+ '>': operator.gt,
+ 'gt': operator.gt,
+ 'greaterthan': operator.gt,
+ 'ge': operator.ge,
+ '>=': operator.ge,
+ '<': operator.lt,
+ 'lt': operator.lt,
+ 'lessthan': operator.lt,
+ '<=': operator.le,
+ 'le': operator.le,
}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/utils.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/utils.py
old mode 100644
new mode 100755
index 6afca810..502a311c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/utils.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/utils.py
@@ -1,32 +1,44 @@
# -*- coding: utf-8 -*-
-import json
-import os
+"""
+ jinja2.utils
+ ~~~~~~~~~~~~
+
+ Utility functions.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
import re
-import warnings
+import json
+import errno
from collections import deque
-from random import choice
-from random import randrange
-from string import ascii_letters as _letters
-from string import digits as _digits
from threading import Lock
+from jinja2._compat import text_type, string_types, implements_iterator, \
+ url_quote
-from markupsafe import escape
-from markupsafe import Markup
-from ._compat import abc
-from ._compat import string_types
-from ._compat import text_type
-from ._compat import url_quote
+_word_split_re = re.compile(r'(\s+)')
+_punctuation_re = re.compile(
+ '^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % (
+ '|'.join(map(re.escape, ('(', '<', '<'))),
+ '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
+ )
+)
+_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+_striptags_re = re.compile(r'(|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+_digits = '0123456789'
# special singleton representing missing values for the runtime
-missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
+missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
-concat = u"".join
+concat = u''.join
-_slash_escape = "\\/" not in json.dumps("/")
+_slash_escape = '\\/' not in json.dumps('/')
def contextfunction(f):
@@ -86,26 +98,24 @@ def default(var, default=''):
return default
return var
"""
- from .runtime import Undefined
-
+ from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
- for _ in iterable:
+ for event in iterable:
pass
def clear_caches():
- """Jinja keeps internal caches for environments and lexers. These are
- used so that Jinja doesn't have to recreate environments and lexers all
+ """Jinja2 keeps internal caches for environments and lexers. These are
+ used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
- from .environment import _spontaneous_environments
- from .lexer import _lexer_cache
-
+ from jinja2.environment import _spontaneous_environments
+ from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
@@ -122,10 +132,12 @@ def import_string(import_name, silent=False):
:return: imported object
"""
try:
- if ":" in import_name:
- module, obj = import_name.split(":", 1)
- elif "." in import_name:
- module, _, obj = import_name.rpartition(".")
+ if ':' in import_name:
+ module, obj = import_name.split(':', 1)
+ elif '.' in import_name:
+ items = import_name.split('.')
+ module = '.'.join(items[:-1])
+ obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
@@ -134,14 +146,15 @@ def import_string(import_name, silent=False):
raise
-def open_if_exists(filename, mode="rb"):
+def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
- otherwise ``None``.
+ otherwise `None`.
"""
- if not os.path.isfile(filename):
- return None
-
- return open(filename, mode)
+ try:
+ return open(filename, mode)
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
+ raise
def object_type_repr(obj):
@@ -150,19 +163,15 @@ def object_type_repr(obj):
example for `None` and `Ellipsis`).
"""
if obj is None:
- return "None"
+ return 'None'
elif obj is Ellipsis:
- return "Ellipsis"
-
- cls = type(obj)
-
+ return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
- if cls.__module__ in ("__builtin__", "builtins"):
- name = cls.__name__
+ if obj.__class__.__module__ in ('__builtin__', 'builtins'):
+ name = obj.__class__.__name__
else:
- name = cls.__module__ + "." + cls.__name__
-
- return "%s object" % name
+ name = obj.__class__.__module__ + '.' + obj.__class__.__name__
+ return '%s object' % name
def pformat(obj, verbose=False):
@@ -171,11 +180,9 @@ def pformat(obj, verbose=False):
"""
try:
from pretty import pretty
-
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
-
return pformat(obj)
@@ -193,77 +200,45 @@ def urlize(text, trim_url_limit=None, rel=None, target=None):
If target is not None, a target attribute will be added to the link.
"""
- trim_url = (
- lambda x, limit=trim_url_limit: limit is not None
- and (x[:limit] + (len(x) >= limit and "..." or ""))
- or x
- )
- words = re.split(r"(\s+)", text_type(escape(text)))
- rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
- target_attr = target and ' target="%s"' % escape(target) or ""
+ trim_url = lambda x, limit=trim_url_limit: limit is not None \
+ and (x[:limit] + (len(x) >=limit and '...'
+ or '')) or x
+ words = _word_split_re.split(text_type(escape(text)))
+ rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
+ target_attr = target and ' target="%s"' % escape(target) or ''
for i, word in enumerate(words):
- head, middle, tail = "", word, ""
- match = re.match(r"^([(<]|<)+", middle)
-
+ match = _punctuation_re.match(word)
if match:
- head = match.group()
- middle = middle[match.end() :]
-
- # Unlike lead, which is anchored to the start of the string,
- # need to check that the string ends with any of the characters
- # before trying to match all of them, to avoid backtracking.
- if middle.endswith((")", ">", ".", ",", "\n", ">")):
- match = re.search(r"([)>.,\n]|>)+$", middle)
-
- if match:
- tail = match.group()
- middle = middle[: match.start()]
-
- if middle.startswith("www.") or (
- "@" not in middle
- and not middle.startswith("http://")
- and not middle.startswith("https://")
- and len(middle) > 0
- and middle[0] in _letters + _digits
- and (
- middle.endswith(".org")
- or middle.endswith(".net")
- or middle.endswith(".com")
- )
- ):
- middle = '%s ' % (
- middle,
- rel_attr,
- target_attr,
- trim_url(middle),
- )
-
- if middle.startswith("http://") or middle.startswith("https://"):
- middle = '%s ' % (
- middle,
- rel_attr,
- target_attr,
- trim_url(middle),
- )
-
- if (
- "@" in middle
- and not middle.startswith("www.")
- and ":" not in middle
- and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle)
- ):
- middle = '%s ' % (middle, middle)
-
- words[i] = head + middle + tail
-
- return u"".join(words)
+ lead, middle, trail = match.groups()
+ if middle.startswith('www.') or (
+ '@' not in middle and
+ not middle.startswith('http://') and
+ not middle.startswith('https://') and
+ len(middle) > 0 and
+ middle[0] in _letters + _digits and (
+ middle.endswith('.org') or
+ middle.endswith('.net') or
+ middle.endswith('.com')
+ )):
+ middle = '%s ' % (middle,
+ rel_attr, target_attr, trim_url(middle))
+ if middle.startswith('http://') or \
+ middle.startswith('https://'):
+ middle = '%s ' % (middle,
+ rel_attr, target_attr, trim_url(middle))
+ if '@' in middle and not middle.startswith('www.') and \
+ not ':' in middle and _simple_email_re.match(middle):
+ middle = '%s ' % (middle, middle)
+ if lead + middle + trail != word:
+ words[i] = lead + middle + trail
+ return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
- from .constants import LOREM_IPSUM_WORDS
-
+ from jinja2.constants import LOREM_IPSUM_WORDS
+ from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
@@ -288,53 +263,43 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
- word += ","
+ word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
- word += "."
+ word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
- p = u" ".join(p)
- if p.endswith(","):
- p = p[:-1] + "."
- elif not p.endswith("."):
- p += "."
+ p = u' '.join(p)
+ if p.endswith(','):
+ p = p[:-1] + '.'
+ elif not p.endswith('.'):
+ p += '.'
result.append(p)
if not html:
- return u"\n\n".join(result)
- return Markup(u"\n".join(u"%s
" % escape(x) for x in result))
-
+ return u'\n\n'.join(result)
+ return Markup(u'\n'.join(u'%s
' % escape(x) for x in result))
-def unicode_urlencode(obj, charset="utf-8", for_qs=False):
- """Quote a string for use in a URL using the given charset.
- This function is misnamed, it is a wrapper around
- :func:`urllib.parse.quote`.
+def unicode_urlencode(obj, charset='utf-8', for_qs=False):
+ """URL escapes a single bytestring or unicode string with the
+ given charset if applicable to URL safe quoting under all rules
+ that need to be considered under all supported Python versions.
- :param obj: String or bytes to quote. Other types are converted to
- string then encoded to bytes using the given charset.
- :param charset: Encode text to bytes using this charset.
- :param for_qs: Quote "/" and use "+" for spaces.
+ If non strings are provided they are converted to their unicode
+ representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
-
if isinstance(obj, text_type):
obj = obj.encode(charset)
-
- safe = b"" if for_qs else b"/"
- rv = url_quote(obj, safe)
-
- if not isinstance(rv, text_type):
- rv = rv.decode("utf-8")
-
+ safe = not for_qs and b'/' or b''
+ rv = text_type(url_quote(obj, safe))
if for_qs:
- rv = rv.replace("%20", "+")
-
+ rv = rv.replace('%20', '+')
return rv
@@ -361,9 +326,9 @@ def _postinit(self):
def __getstate__(self):
return {
- "capacity": self.capacity,
- "_mapping": self._mapping,
- "_queue": self._queue,
+ 'capacity': self.capacity,
+ '_mapping': self._mapping,
+ '_queue': self._queue
}
def __setstate__(self, d):
@@ -377,7 +342,7 @@ def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
- rv._queue.extend(self._queue)
+ rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
@@ -391,11 +356,15 @@ def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
+ self._wlock.acquire()
try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ finally:
+ self._wlock.release()
def clear(self):
"""Clear the cache."""
@@ -415,7 +384,10 @@ def __len__(self):
return len(self._mapping)
def __repr__(self):
- return "<%s %r>" % (self.__class__.__name__, self._mapping)
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self._mapping
+ )
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
@@ -464,6 +436,7 @@ def __delitem__(self, key):
try:
self._remove(key)
except ValueError:
+ # __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
@@ -476,12 +449,6 @@ def items(self):
def iteritems(self):
"""Iterate over all items."""
- warnings.warn(
- "'iteritems()' will be removed in version 3.0. Use"
- " 'iter(cache.items())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
return iter(self.items())
def values(self):
@@ -490,22 +457,6 @@ def values(self):
def itervalue(self):
"""Iterate over all values."""
- warnings.warn(
- "'itervalue()' will be removed in version 3.0. Use"
- " 'iter(cache.values())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self.values())
-
- def itervalues(self):
- """Iterate over all values."""
- warnings.warn(
- "'itervalues()' will be removed in version 3.0. Use"
- " 'iter(cache.values())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
return iter(self.values())
def keys(self):
@@ -516,19 +467,12 @@ def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
- warnings.warn(
- "'iterkeys()' will be removed in version 3.0. Use"
- " 'iter(cache.keys())' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self)
-
- def __iter__(self):
return reversed(tuple(self._queue))
+ __iter__ = iterkeys
+
def __reversed__(self):
- """Iterate over the keys in the cache dict, oldest items
+ """Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
@@ -536,15 +480,18 @@ def __reversed__(self):
__copy__ = copy
-abc.MutableMapping.register(LRUCache)
+# register the LRU cache as mutable mapping if possible
+try:
+ from collections import MutableMapping
+ MutableMapping.register(LRUCache)
+except ImportError:
+ pass
-def select_autoescape(
- enabled_extensions=("html", "htm", "xml"),
- disabled_extensions=(),
- default_for_string=True,
- default=False,
-):
+def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
+ disabled_extensions=(),
+ default_for_string=True,
+ default=False):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
@@ -579,9 +526,10 @@ def select_autoescape(
.. versionadded:: 2.9
"""
- enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
- disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
-
+ enabled_patterns = tuple('.' + x.lstrip('.').lower()
+ for x in enabled_extensions)
+ disabled_patterns = tuple('.' + x.lstrip('.').lower()
+ for x in disabled_extensions)
def autoescape(template_name):
if template_name is None:
return default_for_string
@@ -591,7 +539,6 @@ def autoescape(template_name):
if template_name.endswith(disabled_patterns):
return False
return default
-
return autoescape
@@ -615,63 +562,35 @@ def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""
if dumper is None:
dumper = json.dumps
- rv = (
- dumper(obj, **kwargs)
- .replace(u"<", u"\\u003c")
- .replace(u">", u"\\u003e")
- .replace(u"&", u"\\u0026")
- .replace(u"'", u"\\u0027")
- )
+ rv = dumper(obj, **kwargs) \
+ .replace(u'<', u'\\u003c') \
+ .replace(u'>', u'\\u003e') \
+ .replace(u'&', u'\\u0026') \
+ .replace(u"'", u'\\u0027')
return Markup(rv)
+@implements_iterator
class Cycler(object):
- """Cycle through values by yield them one at a time, then restarting
- once the end is reached. Available as ``cycler`` in templates.
-
- Similar to ``loop.cycle``, but can be used outside loops or across
- multiple loops. For example, render a list of folders and files in a
- list, alternating giving them "odd" and "even" classes.
-
- .. code-block:: html+jinja
-
- {% set row_class = cycler("odd", "even") %}
-
- {% for folder in folders %}
- {{ folder }}
- {% endfor %}
- {% for file in files %}
- {{ file }}
- {% endfor %}
-
-
- :param items: Each positional argument will be yielded in the order
- given for each cycle.
-
- .. versionadded:: 2.1
- """
+ """A cycle helper for templates."""
def __init__(self, *items):
if not items:
- raise RuntimeError("at least one item has to be provided")
+ raise RuntimeError('at least one item has to be provided')
self.items = items
- self.pos = 0
+ self.reset()
def reset(self):
- """Resets the current item to the first item."""
+ """Resets the cycle."""
self.pos = 0
@property
def current(self):
- """Return the current item. Equivalent to the item that will be
- returned next time :meth:`next` is called.
- """
+ """Returns the current item."""
return self.items[self.pos]
def next(self):
- """Return the current item, then advance :attr:`current` to the
- next item.
- """
+ """Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
@@ -682,28 +601,27 @@ def next(self):
class Joiner(object):
"""A joining helper for templates."""
- def __init__(self, sep=u", "):
+ def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
- return u""
+ return u''
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
- initialized from a dictionary or with keyword arguments."""
+ initialized from a dictionary or with keyword argments."""
- def __init__(*args, **kwargs): # noqa: B902
+ def __init__(*args, **kwargs):
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
- # __class__ is needed for the awaitable check in async mode
- if name in {"_Namespace__attrs", "__class__"}:
+ if name == '_Namespace__attrs':
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
@@ -714,24 +632,16 @@ def __setitem__(self, name, value):
self.__attrs[name] = value
def __repr__(self):
- return "" % self.__attrs
+ return '' % self.__attrs
# does this python version support async for in and async generators?
try:
- exec("async def _():\n async for _ in ():\n yield _")
+ exec('async def _():\n async for _ in ():\n yield _')
have_async_gen = True
except SyntaxError:
have_async_gen = False
-def soft_unicode(s):
- from markupsafe import soft_unicode
-
- warnings.warn(
- "'jinja2.utils.soft_unicode' will be removed in version 3.0."
- " Use 'markupsafe.soft_unicode' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return soft_unicode(s)
+# Imported here because that's where it was in the past
+from markupsafe import Markup, escape, soft_unicode
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/visitor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/visitor.py
old mode 100644
new mode 100755
index d1365bf1..ba526dfa
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/visitor.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jinja2/visitor.py
@@ -1,8 +1,14 @@
# -*- coding: utf-8 -*-
-"""API for traversing the AST nodes. Implemented by the compiler and
-meta introspection.
"""
-from .nodes import Node
+ jinja2.visitor
+ ~~~~~~~~~~~~~~
+
+ This module implements a visitor for the nodes.
+
+ :copyright: (c) 2017 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2.nodes import Node
class NodeVisitor(object):
@@ -22,7 +28,7 @@ def get_visitor(self, node):
exists for this node. In that case the generic visit function is
used instead.
"""
- method = "visit_" + node.__class__.__name__
+ method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/LICENSE b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/LICENSE
deleted file mode 100644
index 505b28f2..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-Copyright (c) 2016 by Anton Romanovich.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-* The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/METADATA
deleted file mode 100644
index 5661e45b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/METADATA
+++ /dev/null
@@ -1,126 +0,0 @@
-Metadata-Version: 2.1
-Name: jsl
-Version: 0.2.4
-Summary: A Python DSL for defining JSON schemas
-Home-page: https://jsl.readthedocs.org
-Author: Anton Romanovich
-Author-email: anthony.romanovich@gmail.com
-License: BSD
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-License-File: LICENSE
-
-JSL
-===
-
-.. image:: https://travis-ci.org/aromanovich/jsl.svg?branch=master
- :target: https://travis-ci.org/aromanovich/jsl
- :alt: Build Status
-
-.. image:: https://coveralls.io/repos/aromanovich/jsl/badge.svg?branch=master
- :target: https://coveralls.io/r/aromanovich/jsl?branch=master
- :alt: Coverage
-
-.. image:: https://readthedocs.org/projects/jsl/badge/?version=latest
- :target: https://readthedocs.org/projects/jsl/
- :alt: Documentation
-
-.. image:: http://img.shields.io/pypi/v/jsl.svg
- :target: https://pypi.python.org/pypi/jsl
- :alt: PyPI Version
-
-.. image:: http://img.shields.io/pypi/dm/jsl.svg
- :target: https://pypi.python.org/pypi/jsl
- :alt: PyPI Downloads
-
-Documentation_ | GitHub_ | PyPI_
-
-JSL is a Python DSL for defining JSON Schemas.
-
-Example
--------
-
-::
-
- import jsl
-
- class Entry(jsl.Document):
- name = jsl.StringField(required=True)
-
- class File(Entry):
- content = jsl.StringField(required=True)
-
- class Directory(Entry):
- content = jsl.ArrayField(jsl.OneOfField([
- jsl.DocumentField(File, as_ref=True),
- jsl.DocumentField(jsl.RECURSIVE_REFERENCE_CONSTANT)
- ]), required=True)
-
-``Directory.get_schema(ordered=True)`` will return the following JSON schema:
-
-::
-
- {
- "$schema": "http://json-schema.org/draft-04/schema#",
- "definitions": {
- "directory": {
- "type": "object",
- "properties": {
- "name": {"type": "string"},
- "content": {
- "type": "array",
- "items": {
- "oneOf": [
- {"$ref": "#/definitions/file"},
- {"$ref": "#/definitions/directory"}
- ]
- }
- }
- },
- "required": ["name", "content"],
- "additionalProperties": false
- },
- "file": {
- "type": "object",
- "properties": {
- "name": {"type": "string"},
- "content": {"type": "string"}
- },
- "required": ["name", "content"],
- "additionalProperties": false
- }
- },
- "$ref": "#/definitions/directory"
- }
-
-Installing
-----------
-
-::
-
- pip install jsl
-
-License
--------
-
-`BSD license`_
-
-.. _Documentation: http://jsl.readthedocs.org/
-.. _GitHub: https://github.com/aromanovich/jsl
-.. _PyPI: https://pypi.python.org/pypi/jsl
-.. _BSD license: https://github.com/aromanovich/jsl/blob/master/LICENSE
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/RECORD
deleted file mode 100644
index a77293d6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/RECORD
+++ /dev/null
@@ -1,21 +0,0 @@
-jsl-0.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jsl-0.2.4.dist-info/LICENSE,sha256=SOcEeWBevO0ulaPJ1ZbwQHJrPKCqwjXQ6tUAo_LrYmE,1458
-jsl-0.2.4.dist-info/METADATA,sha256=Hoxdaj4BiIHCDAJfABf-ZKi-mpICjqGFzCXuG6Ve-ag,3655
-jsl-0.2.4.dist-info/RECORD,,
-jsl-0.2.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsl-0.2.4.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-jsl-0.2.4.dist-info/top_level.txt,sha256=5pe24rVnpqKlN1x1SAB6vpgck0EQXzb1HD-STjLrCXE,4
-jsl/__init__.py,sha256=Hav-h60_x4Z_1-SPwtgXo6HFmv2TAay_3xWfootT8Hg,536
-jsl/_compat/__init__.py,sha256=Wfz6h02kZsiVvzNdZALq2RTa7XpEbeAjeiE8CFTzC8Y,2118
-jsl/_compat/ordereddict.py,sha256=4KsFuc6V8IgHROCHUu-4vCrr21ZPPea7Z0cvX9AjQ7w,4094
-jsl/_compat/prepareable.py,sha256=T_gBTXGiK_0zPzV0EqOWlVUUQwWx8qJ4of1wksTDwqA,1739
-jsl/document.py,sha256=1RbdbROms1oiexp3ztXXJNVnBQSlQscI67Z_KvuSQrY,16474
-jsl/exceptions.py,sha256=1_kvw5YDgCASuqD3Q94emy0FgTdhkC_YuBKheGp51Y0,4016
-jsl/fields/__init__.py,sha256=aVP5_lviBvZEYWmD3I4TXmLaBypdeeWZiDbymr4oySM,86
-jsl/fields/base.py,sha256=4kk8AvsqAdRmegVQ14PrWcGfNwRUkchLfwd0otqlzaM,10335
-jsl/fields/compound.py,sha256=2X45LE8hnwySsMU07pEK0JJNNEnlomZ8QDL0ZFOtV7c,27232
-jsl/fields/primitive.py,sha256=XSZtwTIto4shCVACFPMXKbnKLd3d3422UZ6bsvLrz98,5906
-jsl/fields/util.py,sha256=bozkhxNnHkWKW42bbjdzAxlOxcjnCxXheXgtdpAKICI,560
-jsl/registry.py,sha256=4EQ2A7A9c5NDJ8X9TV0SIuQc3TexlyPVGi9kpfd3dPo,621
-jsl/resolutionscope.py,sha256=xy15FynJ6jxvCpPbkQwihqL_ApN-9-SKYT4y2ZhBYm8,2684
-jsl/roles.py,sha256=N8GTZiePmITVHVqkwZ9lT2flri8yeuGEc_Pp8tvszmA,6846
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/WHEEL
deleted file mode 100644
index becc9a66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/top_level.txt
deleted file mode 100644
index 1527f31b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl-0.2.4.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsl
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/ordereddict.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/ordereddict.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/prepareable.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/_compat/prepareable.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/document.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/document.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/exceptions.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/base.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/base.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/compound.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/compound.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/primitive.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/primitive.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/fields/util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/registry.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/registry.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/resolutionscope.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/resolutionscope.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/roles.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsl/roles.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/DESCRIPTION.rst b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/DESCRIPTION.rst
deleted file mode 100644
index e1187231..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-UNKNOWN
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/METADATA
deleted file mode 100644
index 6afe4752..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/METADATA
+++ /dev/null
@@ -1,39 +0,0 @@
-Metadata-Version: 2.0
-Name: json-spec
-Version: 0.10.1
-Summary: Implements JSON Schema, JSON Pointer and JSON Reference.
-Home-page: http://py.errorist.io/json-spec
-Author: Xavier Barbosa
-Author-email: clint.northwood@gmail.com
-License: BSD
-Keywords: json,utilitaries,validation,json-pointer,json-reference,json-schema
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Console
-Classifier: Environment :: OpenStack
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: Information Technology
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Requires-Dist: six
-Requires-Dist: pathlib; python_version=="2.7"
-Requires-Dist: pathlib; python_version=="3.2"
-Requires-Dist: pathlib; python_version=="3.3"
-Provides-Extra: cli
-Requires-Dist: termcolor; extra == 'cli'
-Provides-Extra: ip
-Provides-Extra: ip
-Requires-Dist: ipaddress; python_version=="2.7" and extra == 'ip'
-Provides-Extra: ip
-Requires-Dist: ipaddress; python_version=="3.2" and extra == 'ip'
-
-UNKNOWN
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/RECORD
deleted file mode 100644
index 061cb92c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/RECORD
+++ /dev/null
@@ -1,43 +0,0 @@
-../../bin/json,sha256=O9A-Jqy5-4FBFzn3lZGFxwGfzL0XqHgQurtr_s5yg1Y,211
-json_spec-0.10.1.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
-json_spec-0.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-json_spec-0.10.1.dist-info/METADATA,sha256=agdgsO71HPjhkIyYtSm8auscc89zQx7WVskL4q6avoE,1417
-json_spec-0.10.1.dist-info/RECORD,,
-json_spec-0.10.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
-json_spec-0.10.1.dist-info/entry_points.txt,sha256=U81yBW69TFZHs1rbzcvzc-1dDc52xB4Zlc5LEOKNuI8,1119
-json_spec-0.10.1.dist-info/metadata.json,sha256=ep3XB5ohP1IjiOCTbEq5dC19BqmnJjRbbK2eWqTLr0Q,2892
-json_spec-0.10.1.dist-info/top_level.txt,sha256=nuXD1z9nhYpDj3M1cb8CjTyofpM5ft1n4_SUFUdxvJk,9
-jsonspec/__init__.py,sha256=A77M3hKOR0uVTwrwLR2ah5u4YUqBM3zB_ZGrmP-HKAk,128
-jsonspec/__main__.py,sha256=84_S8skgwpyChkXoftZHQ9Vnqq73-6ehXSx9f4CU-R8,61
-jsonspec/_version.py,sha256=dT4VQLW1_nqi0rGQXJPMl5aQSwvRGyDp7ae23HSaIAU,472
-jsonspec/cli.py,sha256=lBLJtPbrTR_qXZHQwsc_ZWKnzuuKqtQf1-d9NFEZQzI,16440
-jsonspec/driver.py,sha256=o8rxbpgkRDLuUnjUiIl6DcQdq5O2kYL53mvJlJm4Zf8,465
-jsonspec/misc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonspec/misc/schemas/draft-03/card.json,sha256=yFAfxR_-RrQFGL0rvWXZJtg3FnIKFQs6MjBtojOIwoA,1791
-jsonspec/misc/schemas/draft-03/hyper-schema.json,sha256=w89zVOSUuBzcIwQ2H-BNn1yl441wYv1lrQ_UwWBZnoc,897
-jsonspec/misc/schemas/draft-03/json-ref.json,sha256=RmWvtjS1xGluN42j54KazsZhraeI4n6f6LSPpn3zTFI,394
-jsonspec/misc/schemas/draft-03/links.json,sha256=RmWvtjS1xGluN42j54KazsZhraeI4n6f6LSPpn3zTFI,394
-jsonspec/misc/schemas/draft-03/schema.json,sha256=UWn9BJeN29Qx1m4uoIoEJAh4wrrYCNhmlPA6pth6uZ8,2476
-jsonspec/misc/schemas/draft-04/hyper-schema.json,sha256=3XVDAXrtFIa4eg1s0BmGIOpTjURJ8onkZ66KGttdtbU,4816
-jsonspec/misc/schemas/draft-04/schema.json,sha256=c1P_E_qpeQJ4E7lcizX5kgV4ImlqcMQ8ebO_tySedrQ,4375
-jsonspec/operations/__init__.py,sha256=WjvUJ7KU_6YagrTLTeiviDR1y-_MA0AzfURFICKGLEk,2424
-jsonspec/operations/bases.py,sha256=y1C2PtN2Z1rf-C1j6F-83cJfZDcPrCIZrxKFj38oI2g,6534
-jsonspec/operations/exceptions.py,sha256=b5Xw-N6e76XqKJK6rv-JMhsUJ9IjiesceYcp72qJ0vw,298
-jsonspec/pointer/__init__.py,sha256=hPa4LsK7nAAA9wqcKm7hPIskTe9tdCDM6D1pF-Aa6u0,1042
-jsonspec/pointer/bases.py,sha256=6LOqcwBxlzGb3Rf5azABpWKpIC1tFdyORzAyxVRl-Hw,7669
-jsonspec/pointer/exceptions.py,sha256=YVkGu1I0tHRKG-130WuzLIWetsNNmQcbAjWPs38nQIg,1591
-jsonspec/pointer/stages.py,sha256=MJ4HLy8CwwEEJmXBy_UGjf_QZUeRgSM-a_kVnfiaXKU,1970
-jsonspec/reference/__init__.py,sha256=W4tiyAwJUcxYax-zsr6F5HEua1eG4-MEbPIZVb0tB8A,1660
-jsonspec/reference/bases.py,sha256=7PGFEuXRR3l9Hd8RNmxAzRATn3jrCSm0jmyV8YjdhtA,3015
-jsonspec/reference/exceptions.py,sha256=pcHyar7K64H3ZR3UT5TejlzCLSVZJYgnNFwzBzrUepo,256
-jsonspec/reference/providers.py,sha256=DkDS3s_V9ooInBgQbbwYrVVcaUaLkRx4bm28Zqtx9wo,5449
-jsonspec/reference/util.py,sha256=LyIx-BNMDcmXOdwXxjmuux0cQwRCbQ1Nee2WxY80LYg,409
-jsonspec/validators/__init__.py,sha256=FwljvjOKzA7PDzj-fpGuqCjWBuf4UeUVBhVF93sBPEo,1254
-jsonspec/validators/bases.py,sha256=t4LPAp6A_qGxSpb2V2yVoWlf0VgFFmaqHdzPm8CtaMg,2484
-jsonspec/validators/draft03.py,sha256=lN2c1jktN12A0srtuutdB3e0D0Qb9BpAl9dv7BINuPU,26448
-jsonspec/validators/draft04.py,sha256=OrjX_Jat6KUlbSBljFjidSnC6I-a_idVGYkptlUyfsk,26115
-jsonspec/validators/exceptions.py,sha256=_B-lwKghJddC4LqDCXXAzrAK9i4RFJlWe5HKdYTiakg,1935
-jsonspec/validators/factorize.py,sha256=sItg4V_6ArBONK_25Eneoskgbr-OAdRdtKy9pzKTS4A,3884
-jsonspec/validators/formats.py,sha256=6DKFzHa9U7XzRiLDXN4RL3zh6NhKlermvMa0chqJqNI,3889
-jsonspec/validators/pointer_util.py,sha256=zS3Zier_TGhx-VV013oya9EV4Mfq8vh1sDJ3cxHf4e0,388
-jsonspec/validators/util.py,sha256=wzb8f6ZL9WDtZLVqav2jUaqGbQxDTS7VC_o4QPwV0KY,7655
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/WHEEL
deleted file mode 100644
index 8b6dd1b5..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.29.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/entry_points.txt
deleted file mode 100644
index d11508ef..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/entry_points.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-[console_scripts]
-json = jsonspec.cli:main
-
-[jsonspec.cli.commands]
-add = jsonspec.cli:AddCommand
-check = jsonspec.cli:CheckCommand
-copy = jsonspec.cli:CopyCommand
-extract = jsonspec.cli:ExtractCommand
-move = jsonspec.cli:MoveCommand
-remove = jsonspec.cli:RemoveCommand
-replace = jsonspec.cli:ReplaceCommand
-validate = jsonspec.cli:ValidateCommand
-
-[jsonspec.reference.contributions]
-spec = jsonspec.reference.providers:SpecProvider
-
-[jsonspec.validators.formats]
-css.color = jsonspec.validators.util:validate_css_color
-email = jsonspec.validators.util:validate_email
-hostname = jsonspec.validators.util:validate_hostname
-ipv4 = jsonspec.validators.util:validate_ipv4 [ip]
-ipv6 = jsonspec.validators.util:validate_ipv6 [ip]
-regex = jsonspec.validators.util:validate_regex
-rfc3339.datetime = jsonspec.validators.util:validate_rfc3339_datetime
-uri = jsonspec.validators.util:validate_uri
-utc.date = jsonspec.validators.util:validate_utc_date
-utc.datetime = jsonspec.validators.util:validate_utc_datetime
-utc.millisec = jsonspec.validators.util:validate_utc_millisec
-utc.time = jsonspec.validators.util:validate_utc_time
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/metadata.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/metadata.json
deleted file mode 100644
index 50ffb0ad..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Console", "Environment :: OpenStack", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.commands": {"wrap_console": {"json": "jsonspec.cli:main"}}, "python.details": {"contacts": [{"email": "clint.northwood@gmail.com", "name": "Xavier Barbosa", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://py.errorist.io/json-spec"}}, "python.exports": {"console_scripts": {"json": "jsonspec.cli:main"}, "jsonspec.cli.commands": {"add": "jsonspec.cli:AddCommand", "check": "jsonspec.cli:CheckCommand", "copy": "jsonspec.cli:CopyCommand", "extract": "jsonspec.cli:ExtractCommand", "move": "jsonspec.cli:MoveCommand", "remove": "jsonspec.cli:RemoveCommand", "replace": "jsonspec.cli:ReplaceCommand", "validate": "jsonspec.cli:ValidateCommand"}, "jsonspec.reference.contributions": {"spec": "jsonspec.reference.providers:SpecProvider"}, "jsonspec.validators.formats": {"css.color": "jsonspec.validators.util:validate_css_color", "email": "jsonspec.validators.util:validate_email", "hostname": "jsonspec.validators.util:validate_hostname", "ipv4": "jsonspec.validators.util:validate_ipv4 [ip]", "ipv6": "jsonspec.validators.util:validate_ipv6 [ip]", "regex": "jsonspec.validators.util:validate_regex", "rfc3339.datetime": "jsonspec.validators.util:validate_rfc3339_datetime", "uri": "jsonspec.validators.util:validate_uri", "utc.date": "jsonspec.validators.util:validate_utc_date", "utc.datetime": "jsonspec.validators.util:validate_utc_datetime", "utc.millisec": "jsonspec.validators.util:validate_utc_millisec", "utc.time": "jsonspec.validators.util:validate_utc_time"}}}, "extras": ["cli", "ip"], "generator": "bdist_wheel (0.29.0)", "keywords": ["json", "utilitaries", "validation", "json-pointer", "json-reference", "json-schema"], "license": "BSD", "metadata_version": "2.0", "name": "json-spec", "run_requires": [{"requires": ["six"]}, {"extra": "cli", "requires": ["termcolor"]}, {"environment": "python_version==\"2.7\"", "extra": "ip", "requires": ["ipaddress"]}, {"environment": "python_version==\"2.7\"", "requires": ["pathlib"]}, {"environment": "python_version==\"3.2\"", "extra": "ip", "requires": ["ipaddress"]}, {"environment": "python_version==\"3.2\"", "requires": ["pathlib"]}, {"environment": "python_version==\"3.3\"", "requires": ["pathlib"]}], "summary": "Implements JSON Schema, JSON Pointer and JSON Reference.", "version": "0.10.1"}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/top_level.txt
deleted file mode 100644
index 47f227fb..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/json_spec-0.10.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsonspec
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/COPYING b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/COPYING
deleted file mode 100644
index 02360d75..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2013-2019 Gaspare Iengo ( Dando Real ITA )
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/METADATA
deleted file mode 100644
index b4ec68e0..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/METADATA
+++ /dev/null
@@ -1,189 +0,0 @@
-Metadata-Version: 2.1
-Name: jsoncomment
-Version: 0.4.2
-Summary: A wrapper to JSON parsers allowing comments, multiline strings and trailing commas
-Home-page: https://bitbucket.org/Dando_Real_ITA/json-comment
-Author: Gaspare Iengo
-Author-email: gaspareiengo@gmail.com
-License: MIT
-Keywords: json comments multiline
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: End Users/Desktop
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Natural Language :: English
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Topic :: Software Development :: Pre-processors
-Classifier: Topic :: Text Editors :: Text Processing
-Requires-Python: >=3.3
-Description-Content-Type: text/markdown
-Requires-Dist: json-spec
-Provides-Extra: ujson
-Requires-Dist: ujson (>=1.30) ; extra == 'ujson'
-
-Json Comment
-============
-
-A wrapper to JSON parsers allowing comments, multiline strings and trailing commas
-
-- - -
-
-Dependencies
-------------
-
-* Python 3.3+
-* TODO NEW ADDITIONS
-
-### Optional
-
-* ujson 1.30+
-
-- - -
-
-Description
------------
-
-JSON Comment allows to parse JSON files or strings with:
-
-* Single and Multi line comments
-* Multi line data strings
-* Trailing commas in objects and arrays, after the last item
-
-This package works with any JSON parser which supports:
-
-* `load(fp, ...)` to parse files
-* `loads(s, ...)` to parse strings
-
-by adding a preprocessor to these calls.
-
-- - -
-
-### Comments
-
-* `#`, `;` and `//` are for single line comments
-* `/*` and `*/` enclose multiline comments
-
-Inline comments are **not** supported
-
-- - -
-
-### Multiline strings
-
-Any string can be multiline, even object keys.
-
-* Multiline strings start and end with `"""`, like in python
-* The preprocessor merges all lines to a single JSON standard string
-* A single trailing space per line is kept, if present
-* New line is not kept. To hard code new lines in the string, use `\\n`
-
-- - -
-
-Custom Methods
---------------
-
-### File Load
-
-`loadf(path, *args, default = None, **kwargs)`
-
-Opens a JSON file with comments. Allows a default value if loading or parsing fails
-
-### File Save
-
-`dumpf(json_obj, path, *args, indent=4, **kwargs)`
-
-Saves a JSON file with indentation
-
-- - -
-
-Install
--------
-
-`pip install jsoncomment`
-
-OR
-
-* Download source
-* `python setup.py install`
-
-- - -
-
-Usage
------
-
- from jsoncomment import JsonComment
-
- string = "[]"
- json = JsonComment()
- json_obj = json.loads(string)
-
-### Examples
-
-Added in the /examples directory
-
-- - -
-
-### Limitations
-
-* `#`, `;`, `//` and `/*` may be preceded only by whitespaces or tabs on the same line
-* `*/` may be followed only by whitespaces or tabs on the same line
-* The trailing comma must be the last character on the same line
-
-- - -
-
-Source
-------
-
-[Source](https://bitbucket.org/Dando_Real_ITA/json-comment/src/default)
-code available with MIT license on Bitbucket.
-
-- - -
-
-API
----
-
-Added in top level `__init__.py`
-
-### How to read the API
-
-API is split in:
-
- * `User Interface` for common use
- * `Developer Interface` exposing some internals that could be useful
-
-For each item ( function or class ), there are 2 blocks of comments, above and below item definition:
-
- * The top describes the return values
- * The bottom describes the item and call variables
-
-If call variables have defaults or use duck typing, every allowed value is described
-
-Example:
-
- # return_value
- # description
- from .some_module import SomeClass
- # SomeClass description
- # (
- # variable_1,
- # description
- # variable_2 = something,
- # description
-
- # = Default
- # description of default value ( something )
- # = something_2
- # description of alternate form ( duck typing )
- # )
-
-describes `return_value = SomeClass(variable_1, variable_2 = current_value)`
-
-- - -
-
-Contact
--------
-
-Gaspare Iengo @ [email](mailto:gaspareiengo@gmail.com)
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/RECORD
deleted file mode 100644
index d3a6df94..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/RECORD
+++ /dev/null
@@ -1,10 +0,0 @@
-jsoncomment-0.4.2.dist-info/COPYING,sha256=gu53buiij96a9W1g0xORV4R6GmzPtu-xmZ4mfHOovYQ,1098
-jsoncomment-0.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jsoncomment-0.4.2.dist-info/METADATA,sha256=PnNE_qsbM68g7oKDmvIXJA_Ll8ThUueWXI4zr64o_l0,3928
-jsoncomment-0.4.2.dist-info/RECORD,,
-jsoncomment-0.4.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsoncomment-0.4.2.dist-info/WHEEL,sha256=LRS-GSFNyYPLBJaJJ6kxdk4Urtb4kEVXWc5DpA8iv6o,97
-jsoncomment-0.4.2.dist-info/top_level.txt,sha256=WEsvmxL9rjjlfFf2HvGixakFO6QCKnDuIbeSPWDZU30,12
-jsoncomment/__init__.py,sha256=jd_wDTTiV7D6mGKxUEG7f2Ens0jRapHZpoEScFaT0mg,2102
-jsoncomment/comments.py,sha256=dvz1Zt5ObrwzIKeAL6GjYvPpnGwzQ7V-Nv9nUp4TiHw,5613
-jsoncomment/wrapper.py,sha256=_rWGLt8QRn19oy3WxqKpgOuEiMSZZnFPwtDhdv0TyfI,577
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/WHEEL
deleted file mode 100644
index 5be2025d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.32.3)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/top_level.txt
deleted file mode 100644
index 3b4bc6b6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment-0.4.2.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsoncomment
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/__init__.py
deleted file mode 100644
index 75be9a80..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/python
-# coding: utf-8
-
-##########################################################################################################################################
-
-# User Interface
-
-# wrapped_parser
- # The new parser can be used as a drop in replacement of the old parser, supporting both wrapped ( i.e. loads ) and unchanged ( i.e. dumps ) methods
-from .comments import JsonComment
- # Allows any JSON parser to ignore comments, accept multiline strings and a trailing comma in objects/arrays
-# (json_parser_module)
- # Module or Class Instance, which supports JSON load/loads interface
-
-##########################################################################################################################################
-
-# Developer Interface
-
-# wrapped_instance
- # An instance mimicking the wrapped object
-from .wrapper import GenericWrapper
- # A Class to simulate dynamic inheritance of Modules and Classes
- # This class should only be inherited from.
- # Inheriting classes can access their wrapped object via self.wrapped
-# (wrapped)
- # Module or Class Instance to be wrapped
-
- # Use example:
- # class WrapMyClass(GenericWrapper):
- # # Preprocess part of the call to method1, then call method1 of the wrapped object
- # def method1 (self, some_variable, *args, **kwargs):
- # new_variable = do something with some_variable
- # return self.wrapped.method1( new_variable, *args, **kwargs )
- # # Substitute method2 of the wrapped object
- # def method2 (self, some_variable1, some_variable2):
- # result = do something
- # return result
-
- # # Wraps a class instance
- # wrapped_class = WrapMyClass(MyClass())
- # # The wrapped method
- # print(wrapped_class.method1(some_variable, something_else))
- # # The changed method
- # print(wrapped_class.method2(some_variable1, some_variable2))
- # # The original method, untouched by the wrapper
- # print(wrapped_class.method3(anything))
-
-##########################################################################################################################################
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/comments.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/comments.py
deleted file mode 100644
index 36a27ca7..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/comments.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/bin/python
-# coding: utf-8
-
-##########################################################################################################################################
-
-# For templating
-import re
-
-# The parser
-try:
- import ujson as json
-except ImportError:
- import json
-
-# For templating
-from jsonspec.pointer import extract, ExtractError
-
-# The wrapper base class
-from .wrapper import GenericWrapper
-
-##########################################################################################################################################
-
-# Comments
-COMMENT_PREFIX = ("#",";","//")
-MULTILINE_START = "/*"
-MULTILINE_END = "*/"
-
-# Data strings
-LONG_STRING = '"""'
-
-# JSON Pointer template
-TEMPLATE_RE = re.compile(r"\{\{(.*?)\}\}")
-
-##########################################################################################################################################
-
-class JsonComment(GenericWrapper):
- def __init__(self, wrapped=json):
- super().__init__(wrapped)
-
- # Loads a JSON string with comments
- # Allows to expand the JSON Pointer templates
- def loads(self, jsonsc, *args, template=True, **kwargs):
- # Splits the string in lines
- lines = jsonsc.splitlines()
- # Process the lines to remove commented ones
- jsons = self._preprocess(lines)
- # Calls the wrapped to parse JSON
- self.obj = self.wrapped.loads(jsons, *args, **kwargs)
- # If there are templates, subs them
- if template:
- self._templatesub(self.obj)
- return self.obj
-
- # Loads a JSON opened file with comments
- def load(self, jsonf, *args, **kwargs):
- # Reads a text file as a string
- # Process the readed JSON string
- return self.loads(jsonf.read(), *args, **kwargs)
-
- # Opens a JSON file with comments
- # Allows a default value if loading or parsing fails
- def loadf(self, path, *args, default = None, **kwargs):
- # Preparing the default
- json_obj = default
-
- # Opening file in append+read mode
- # Allows creation of empty file if non-existent
- with open( path, mode="a+", encoding="UTF-8" ) as jsonf:
- try:
- # Back to file start
- jsonf.seek(0)
- # Parse and load the JSON
- json_obj = self.load(jsonf, *args, **kwargs)
- # If fails, default value is kept
- except ValueError:
- pass
-
- return json_obj
-
- # Saves a JSON file with indentation
- def dumpf(self, json_obj, path, *args, indent=4, escape_forward_slashes=False, **kwargs):
- # Opening file in write mode
- with open( path, mode="w", encoding="UTF-8" ) as jsonf:
- # Dumping the object
- # Keyword escape_forward_slashes is only for ujson, standard json raises an exception for unknown keyword
- # In that case, the method is called again without it
- try:
- json.dump(json_obj, jsonf, *args, indent=indent, escape_forward_slashes=escape_forward_slashes, **kwargs)
- except TypeError:
- json.dump(json_obj, jsonf, *args, indent=indent, **kwargs)
-
- # Reads lines and skips comments
- def _preprocess(self, lines):
- standard_json = ""
- is_multiline = False
- keep_trail_space = 0
-
- for line in lines:
- # 0 if there is no trailing space
- # 1 otherwise
- keep_trail_space = int(line.endswith(" "))
-
- # Remove all whitespace on both sides
- line = line.strip()
-
- # Skip blank lines
- if len(line) == 0:
- continue
-
- # Skip single line comments
- if line.startswith(COMMENT_PREFIX):
- continue
-
- # Mark the start of a multiline comment
- # Not skipping, to identify single line comments using multiline comment tokens, like
- # /***** Comment *****/
- if line.startswith(MULTILINE_START):
- is_multiline = True
-
- # Skip a line of multiline comments
- if is_multiline:
- # Mark the end of a multiline comment
- if line.endswith(MULTILINE_END):
- is_multiline = False
- continue
-
- # Replace the multi line data token to the JSON valid one
- if LONG_STRING in line:
- line = line.replace(LONG_STRING, '"')
-
- standard_json += line + " " * keep_trail_space
-
- # Removing non-standard trailing commas
- standard_json = standard_json.replace(",]", "]")
- standard_json = standard_json.replace(",}", "}")
-
- return standard_json
-
- # Walks the json object and subs template strings with pointed value
- def _templatesub(self, obj):
- # Gets items for iterables
- if isinstance(obj, dict):
- items = obj.items()
- elif isinstance(obj, list):
- items = enumerate(obj)
- else:
- items = None
-
- # Walks the iterable
- for key, subobj in items:
- # If subobj is another iterable, call this method again
- if isinstance(subobj, (dict, list)):
- self._templatesub(subobj)
- # If is a string:
- # - Find all matches to the template
- # - For each match, get through JSON Pointer the value, which must be a string
- # - Substitute each match to the pointed value, or ""
- # - The string with all templates substitued is written back to the parent obj
- elif isinstance(subobj, str):
- obj[key] = TEMPLATE_RE.sub(self._repl_getvalue, subobj)
-
- # Replacement function
- # The match has the JSON Pointer
- def _repl_getvalue(self, match):
- try:
- # Extracts the pointed value from the root object
- value = extract(self.obj, match[1])
- # If it's not a string, it's not valid
- if not isinstance(value, str):
- raise ValueError("Not a string: {}".format(value))
- except (ExtractError, ValueError) as e:
- # Sets value to empty string
- value = ""
- print(e)
- return value
-
-##########################################################################################################################################
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/wrapper.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/wrapper.py
deleted file mode 100644
index 3bbf078b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsoncomment/wrapper.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/python
-# coding: utf-8
-
-##########################################################################################################################################
-
-# A Class to simulate dynamic inheritance of Modules and Classes
-class GenericWrapper:
- def __init__(self, wrapped):
- self.wrapped = wrapped
-
- # Fallback lookup for undefined methods
- def __getattr__(self, name):
- return getattr(self.wrapped, name)
-
-##########################################################################################################################################
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/METADATA
deleted file mode 100644
index 7c5174de..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/METADATA
+++ /dev/null
@@ -1,354 +0,0 @@
-Metadata-Version: 2.1
-Name: jsonpath-ng
-Version: 1.5.3
-Summary: A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming.
-Home-page: https://github.com/h2non/jsonpath-ng
-Author: Tomas Aparicio
-Author-email: tomas@aparicio.me
-License: Apache 2.0
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Requires-Dist: ply
-Requires-Dist: decorator
-Requires-Dist: six
-
-Python JSONPath Next-Generation |Build Status| |PyPI|
-=====================================================
-
-A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic
-and binary comparison operators, as defined in the original `JSONPath proposal`_.
-
-This packages merges both `jsonpath-rw`_ and `jsonpath-rw-ext`_ and
-provides several AST API enhancements, such as the ability to update or removes nodes in the tree.
-
-About
------
-
-This library provides a robust and significantly extended implementation
-of JSONPath for Python. It is tested with CPython 2.6, 2.7 & 3.x.
-
-This library differs from other JSONPath implementations in that it is a
-full *language* implementation, meaning the JSONPath expressions are
-first class objects, easy to analyze, transform, parse, print, and
-extend.
-
-Quick Start
------------
-
-To install, use pip:
-
-.. code:: bash
-
- $ pip install --upgrade jsonpath-ng
-
-
-Usage
------
-
-Basic examples:
-
-.. code:: python
-
- $ python
-
- >>> from jsonpath_ng import jsonpath, parse
-
- # A robust parser, not just a regex. (Makes powerful extensions possible; see below)
- >>> jsonpath_expr = parse('foo[*].baz')
-
- # Extracting values is easy
- >>> [match.value for match in jsonpath_expr.find({'foo': [{'baz': 1}, {'baz': 2}]})]
- [1, 2]
-
- # Matches remember where they came from
- >>> [str(match.full_path) for match in jsonpath_expr.find({'foo': [{'baz': 1}, {'baz': 2}]})]
- ['foo.[0].baz', 'foo.[1].baz']
-
- # And this can be useful for automatically providing ids for bits of data that do not have them (currently a global switch)
- >>> jsonpath.auto_id_field = 'id'
- >>> [match.value for match in parse('foo[*].id').find({'foo': [{'id': 'bizzle'}, {'baz': 3}]})]
- ['foo.bizzle', 'foo.[1]']
-
- # A handy extension: named operators like `parent`
- >>> [match.value for match in parse('a.*.b.`parent`.c').find({'a': {'x': {'b': 1, 'c': 'number one'}, 'y': {'b': 2, 'c': 'number two'}}})]
- ['number two', 'number one']
-
- # You can also build expressions directly quite easily
- >>> from jsonpath_ng.jsonpath import Fields
- >>> from jsonpath_ng.jsonpath import Slice
-
- >>> jsonpath_expr_direct = Fields('foo').child(Slice('*')).child(Fields('baz')) # This is equivalent
-
-
-Using the extended parser:
-
-.. code:: python
-
- $ python
-
- >>> from jsonpath_ng.ext import parse
-
- # A robust parser, not just a regex. (Makes powerful extensions possible; see below)
- >>> jsonpath_expr = parse('foo[*].baz')
-
-
-JSONPath Syntax
----------------
-
-The JSONPath syntax supported by this library includes some additional
-features and omits some problematic features (those that make it
-unportable). In particular, some new operators such as ``|`` and
-``where`` are available, and parentheses are used for grouping not for
-callbacks into Python, since with these changes the language is not
-trivially associative. Also, fields may be quoted whether or not they
-are contained in brackets.
-
-Atomic expressions:
-
-+-----------------------+---------------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=======================+=============================================================================================+
-| ``$`` | The root object |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ```this``` | The "current" object. |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ```foo``` | More generally, this syntax allows "named operators" to extend JSONPath is arbitrary ways |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| *field* | Specified field(s), described below |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ``[`` *field* ``]`` | Same as *field* |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ``[`` *idx* ``]`` | Array access, described below (this is always unambiguous with field access) |
-+-----------------------+---------------------------------------------------------------------------------------------+
-
-Jsonpath operators:
-
-+-------------------------------------+------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=====================================+====================================================================================+
-| *jsonpath1* ``.`` *jsonpath2* | All nodes matched by *jsonpath2* starting at any node matching *jsonpath1* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath* ``[`` *whatever* ``]`` | Same as *jsonpath*\ ``.``\ *whatever* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``..`` *jsonpath2* | All nodes matched by *jsonpath2* that descend from any node matching *jsonpath1* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``where`` *jsonpath2* | Any nodes matching *jsonpath1* with a child matching *jsonpath2* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``|`` *jsonpath2* | Any nodes matching the union of *jsonpath1* and *jsonpath2* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-
-Field specifiers ( *field* ):
-
-+-------------------------+-------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=========================+=====================================================================================+
-| ``fieldname`` | the field ``fieldname`` (from the "current" object) |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``"fieldname"`` | same as above, for allowing special characters in the fieldname |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``'fieldname'`` | ditto |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``*`` | any field |
-+-------------------------+-------------------------------------------------------------------------------------+
-| *field* ``,`` *field* | either of the named fields (you can always build equivalent jsonpath using ``|``) |
-+-------------------------+-------------------------------------------------------------------------------------+
-
-Array specifiers ( *idx* ):
-
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=========================================+=======================================================================================+
-| ``[``\ *n*\ ``]`` | array index (may be comma-separated list) |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| ``[``\ *start*\ ``?:``\ *end*\ ``?]`` | array slicing (note that *step* is unimplemented only due to lack of need thus far) |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| ``[*]`` | any array index |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-
-Programmatic JSONPath
----------------------
-
-If you are programming in Python and would like a more robust way to
-create JSONPath expressions that does not depend on a parser, it is very
-easy to do so directly, and here are some examples:
-
-- ``Root()``
-- ``Slice(start=0, end=None, step=None)``
-- ``Fields('foo', 'bar')``
-- ``Index(42)``
-- ``Child(Fields('foo'), Index(42))``
-- ``Where(Slice(), Fields('subfield'))``
-- ``Descendants(jsonpath, jsonpath)``
-
-
-Extras
-------
-
-- *Path data*: The result of ``JsonPath.find`` provide detailed context
- and path data so it is easy to traverse to parent objects, print full
- paths to pieces of data, and generate automatic ids.
-- *Automatic Ids*: If you set ``jsonpath_ng.auto_id_field`` to a value
- other than None, then for any piece of data missing that field, it
- will be replaced by the JSONPath to it, giving automatic unique ids
- to any piece of data. These ids will take into account any ids
- already present as well.
-- *Named operators*: Instead of using ``@`` to reference the currently
- object, this library uses ```this```. In general, any string
- contained in backquotes can be made to be a new operator, currently
- by extending the library.
-
-
-Extensions
-----------
-
-+--------------+----------------------------------------------+
-| name | Example |
-+==============+==============================================+
-| len | - $.objects.`len` |
-+--------------+----------------------------------------------+
-| sub | - $.field.`sub(/foo\\\\+(.*)/, \\\\1)` |
-+--------------+----------------------------------------------+
-| split | - $.field.`split(+, 2, -1)` |
-| | - $.field.`split(sep, segement, maxsplit)` |
-+--------------+----------------------------------------------+
-| sorted | - $.objects.`sorted` |
-| | - $.objects[\\some_field] |
-| | - $.objects[\\some_field,/other_field] |
-+--------------+----------------------------------------------+
-| filter | - $.objects[?(@some_field > 5)] |
-| | - $.objects[?some_field = "foobar")] |
-| | - $.objects[?some_field =~ "foobar")] |
-| | - $.objects[?some_field > 5 & other < 2)] |
-+--------------+----------------------------------------------+
-| arithmetic | - $.foo + "_" + $.bar |
-| (-+*/) | - $.foo * 12 |
-| | - $.objects[*].cow + $.objects[*].cat |
-+--------------+----------------------------------------------+
-
-About arithmetic and string
----------------------------
-
-Operations are done with python operators and allows types that python
-allows, and return [] if the operation can be done due to incompatible types.
-
-When operators are used, a jsonpath must be be fully defined otherwise
-jsonpath-rw-ext can't known if the expression is a string or a jsonpath field,
-in this case it will choice string as type.
-
-Example with data::
-
- {
- 'cow': 'foo',
- 'fish': 'bar'
- }
-
-| **cow + fish** returns **cowfish**
-| **$.cow + $.fish** returns **foobar**
-| **$.cow + "_" + $.fish** returns **foo_bar**
-| **$.cow + "_" + fish** returns **foo_fish**
-
-About arithmetic and list
--------------------------
-
-Arithmetic can be used against two lists if they have the same size.
-
-Example with data::
-
- {'objects': [
- {'cow': 2, 'cat': 3},
- {'cow': 4, 'cat': 6}
- ]}
-
-| **$.objects[\*].cow + $.objects[\*].cat** returns **[6, 9]**
-
-More to explore
----------------
-
-There are way too many JSONPath implementations out there to discuss.
-Some are robust, some are toy projects that still work fine, some are
-exercises. There will undoubtedly be many more. This one is made for use
-in released, maintained code, and in particular for programmatic access
-to the abstract syntax and extension. But JSONPath at its simplest just
-isn't that complicated, so you can probably use any of them
-successfully. Why not this one?
-
-The original proposal, as far as I know:
-
-- `JSONPath - XPath for
- JSON `__ by Stefan Goessner.
-
-Other examples
---------------
-
-Loading json data from file
-
-.. code:: python
-
- import json
- d = json.loads('{"foo": [{"baz": 1}, {"baz": 2}]}')
- # or
- with open('myfile.json') as f:
- d = json.load(f)
-
-Special note about PLY and docstrings
--------------------------------------
-
-The main parsing toolkit underlying this library,
-`PLY `__, does not work with docstrings
-removed. For example, ``PYTHONOPTIMIZE=2`` and ``python -OO`` will both
-cause a failure.
-
-Contributors
-------------
-
-This package is authored and maintained by:
-
-- `Kenn Knowles `__
- (`@kennknowles `__)
-- `Tomas Aparicio `
-
-with the help of patches submitted by `these contributors `__.
-
-Copyright and License
----------------------
-
-Copyright 2013 - Kenneth Knowles
-
-Copyright 2017 - Tomas Aparicio
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may
-not use this file except in compliance with the License. You may obtain
-a copy of the License at
-
-::
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-.. _`JSONPath proposal`: http://goessner.net/articles/JsonPath/
-.. _`jsonpath-rw`: https://github.com/kennknowles/python-jsonpath-rw
-.. _`jsonpath-rw-ext`: https://pypi.python.org/pypi/jsonpath-rw-ext/
-
-.. |PyPi downloads| image:: https://pypip.in/d/jsonpath-ng/badge.png
- :target: https://pypi.python.org/pypi/jsonpath-ng
-.. |Build Status| image:: https://travis-ci.org/h2non/jsonpath-ng.svg?branch=master
- :target: https://travis-ci.org/h2non/jsonpath-ng
-.. |PyPI| image:: https://img.shields.io/pypi/v/jsonpath-ng.svg?maxAge=2592000?style=flat-square
- :target: https://pypi.python.org/pypi/jsonpath-ng
-.. |Documentation Status| image:: https://img.shields.io/badge/docs-latest-green.svg?style=flat
- :target: http://jsonpath-ng.readthedocs.io/en/latest/?badge=latest
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/RECORD
deleted file mode 100644
index ab5a15f2..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/RECORD
+++ /dev/null
@@ -1,21 +0,0 @@
-../../bin/jsonpath_ng,sha256=wyLTfFPfO_g7cOZuZ25Mq6trR-K89aSBqA5id8661_4,237
-jsonpath_ng-1.5.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jsonpath_ng-1.5.3.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
-jsonpath_ng-1.5.3.dist-info/METADATA,sha256=8cxTm5p2jfIj_Putjx_C0nJjDVUymlnc5M2RLx_45XY,16680
-jsonpath_ng-1.5.3.dist-info/RECORD,,
-jsonpath_ng-1.5.3.dist-info/WHEEL,sha256=EVRjI69F5qVjm_YgqcTXPnTAv3BfSUr0WVAHuSP3Xoo,92
-jsonpath_ng-1.5.3.dist-info/entry_points.txt,sha256=X7ZlkFvz1kYSNtz7hXOsRc5L2o3SOCAV0f7IBViZkGQ,70
-jsonpath_ng-1.5.3.dist-info/top_level.txt,sha256=SeYdUWfJ4KSDQbd2GnE6BOd8vMG7Lta9nbIfT2N7xbE,12
-jsonpath_ng/__init__.py,sha256=0jf4O-SfwHQMXtwxleRZRSnRFPNnexPoJc6UDjUk6YQ,116
-jsonpath_ng/bin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonpath_ng/bin/jsonpath.py,sha256=oqMf4yidMF9WoAC22ko3h8K8-jhkarP8wpp52DKmWmc,2151
-jsonpath_ng/exceptions.py,sha256=WbmwVjhCtpqp0enN3Sd4ymlZGP8ZZUkvT9uq7PXiEq4,146
-jsonpath_ng/ext/__init__.py,sha256=oxAHiz1-xcRsDX_KGDCiBh6LGP2zHZKzvI3QxrFTh6E,605
-jsonpath_ng/ext/arithmetic.py,sha256=CvRF0dnFWu7V1v2XrQBjymPJGrxYWIr4ff4efhQQOhE,2381
-jsonpath_ng/ext/filter.py,sha256=gskkbrW9gjYOGQm1BCpxcniweqpbL_YyeB5padjmxM4,3935
-jsonpath_ng/ext/iterable.py,sha256=OIBuVDGbK4Igvd2rZUeiG3UoNIdf4oaLEELOSDNnYZY,2984
-jsonpath_ng/ext/parser.py,sha256=zeqaWrLDTr4kIP-RrPaBZtZqnf6t27fHfIVjwCiS0RU,5286
-jsonpath_ng/ext/string.py,sha256=tc31syR7ctJ-LNGslnO_L4-UyvwgMr7VoEQl_142AGs,3258
-jsonpath_ng/jsonpath.py,sha256=Hzfi1IBkKhqnEj-g4wlLuc1Tz_0bQnN5gsPBLTTQiy4,25078
-jsonpath_ng/lexer.py,sha256=UuZTp0-Kp6J3ebIzNxMcjf7NueH-kE966fAYI7R1Es0,5333
-jsonpath_ng/parser.py,sha256=iN_IEUou2WB6wgiZULub7VzkN6CzQpCmYjKeC22uFSU,5842
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/WHEEL
deleted file mode 100644
index 83ff02e9..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.35.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/entry_points.txt
deleted file mode 100644
index 105e6040..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-jsonpath_ng = jsonpath_ng.bin.jsonpath:entry_point
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/top_level.txt
deleted file mode 100644
index 30b75c56..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng-1.5.3.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsonpath_ng
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/__init__.py
deleted file mode 100644
index b19fd11e..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .jsonpath import * # noqa
-from .parser import parse # noqa
-
-
-# Current package version
-__version__ = '1.5.3'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/bin/jsonpath.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/bin/jsonpath.py
deleted file mode 100644
index 0c365e91..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/bin/jsonpath.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/python
-# encoding: utf-8
-# Copyright © 2012 Felix Richter
-# This work is free. You can redistribute it and/or modify it under the
-# terms of the Do What The Fuck You Want To Public License, Version 2,
-# as published by Sam Hocevar. See the COPYING file for more details.
-
-# Use modern Python
-from __future__ import unicode_literals, print_function, absolute_import
-
-# Standard Library imports
-import json
-import sys
-import glob
-import argparse
-
-# JsonPath-RW imports
-from jsonpath_ng import parse
-
-def find_matches_for_file(expr, f):
- return expr.find(json.load(f))
-
-def print_matches(matches):
- print('\n'.join(['{0}'.format(match.value) for match in matches]))
-
-
-def main(*argv):
- parser = argparse.ArgumentParser(
- description='Search JSON files (or stdin) according to a JSONPath expression.',
- formatter_class=argparse.RawTextHelpFormatter,
- epilog="""
- Quick JSONPath reference (see more at https://github.com/kennknowles/python-jsonpath-rw)
-
- atomics:
- $ - root object
- `this` - current object
-
- operators:
- path1.path2 - same as xpath /
- path1|path2 - union
- path1..path2 - somewhere in between
-
- fields:
- fieldname - field with name
- * - any field
- [_start_?:_end_?] - array slice
- [*] - any array index
- """)
-
-
-
- parser.add_argument('expression', help='A JSONPath expression.')
- parser.add_argument('files', metavar='file', nargs='*', help='Files to search (if none, searches stdin)')
-
- args = parser.parse_args(argv[1:])
-
- expr = parse(args.expression)
- glob_patterns = args.files
-
- if len(glob_patterns) == 0:
- # stdin mode
- print_matches(find_matches_for_file(expr, sys.stdin))
- else:
- # file paths mode
- for pattern in glob_patterns:
- for filename in glob.glob(pattern):
- with open(filename) as f:
- print_matches(find_matches_for_file(expr, f))
-
-def entry_point():
- main(*sys.argv)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/exceptions.py
deleted file mode 100644
index a592d83c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/exceptions.py
+++ /dev/null
@@ -1,10 +0,0 @@
-class JSONPathError(Exception):
- pass
-
-
-class JsonPathLexerError(JSONPathError):
- pass
-
-
-class JsonPathParserError(JSONPathError):
- pass
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/__init__.py
deleted file mode 100644
index 1e5c325f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from .parser import parse # noqa
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/arithmetic.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/arithmetic.py
deleted file mode 100644
index 0ba9e0ca..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/arithmetic.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import operator
-from .. import JSONPath, DatumInContext
-
-
-OPERATOR_MAP = {
- '+': operator.add,
- '-': operator.sub,
- '*': operator.mul,
- '/': operator.truediv,
-}
-
-
-class Operation(JSONPath):
- def __init__(self, left, op, right):
- self.left = left
- self.op = OPERATOR_MAP[op]
- self.right = right
-
- def find(self, datum):
- result = []
- if (isinstance(self.left, JSONPath)
- and isinstance(self.right, JSONPath)):
- left = self.left.find(datum)
- right = self.right.find(datum)
- if left and right and len(left) == len(right):
- for l, r in zip(left, right):
- try:
- result.append(self.op(l.value, r.value))
- except TypeError:
- return []
- else:
- return []
- elif isinstance(self.left, JSONPath):
- left = self.left.find(datum)
- for l in left:
- try:
- result.append(self.op(l.value, self.right))
- except TypeError:
- return []
- elif isinstance(self.right, JSONPath):
- right = self.right.find(datum)
- for r in right:
- try:
- result.append(self.op(self.left, r.value))
- except TypeError:
- return []
- else:
- try:
- result.append(self.op(self.left, self.right))
- except TypeError:
- return []
- return [DatumInContext.wrap(r) for r in result]
-
- def __repr__(self):
- return '%s(%r%s%r)' % (self.__class__.__name__, self.left, self.op,
- self.right)
-
- def __str__(self):
- return '%s%s%s' % (self.left, self.op, self.right)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/filter.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/filter.py
deleted file mode 100644
index d82f0044..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/filter.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import operator
-import re
-from six import moves
-
-from .. import JSONPath, DatumInContext, Index
-
-
-OPERATOR_MAP = {
- '!=': operator.ne,
- '==': operator.eq,
- '=': operator.eq,
- '<=': operator.le,
- '<': operator.lt,
- '>=': operator.ge,
- '>': operator.gt,
- '=~': lambda a, b: True if re.search(b, a) else False,
-}
-
-
-class Filter(JSONPath):
- """The JSONQuery filter"""
-
- def __init__(self, expressions):
- self.expressions = expressions
-
- def find(self, datum):
- if not self.expressions:
- return datum
-
- datum = DatumInContext.wrap(datum)
-
- if isinstance(datum.value, dict):
- datum.value = list(datum.value.values())
-
- if not isinstance(datum.value, list):
- return []
-
- return [DatumInContext(datum.value[i], path=Index(i), context=datum)
- for i in moves.range(0, len(datum.value))
- if (len(self.expressions) ==
- len(list(filter(lambda x: x.find(datum.value[i]),
- self.expressions))))]
-
- def update(self, data, val):
- if type(data) is list:
- for index, item in enumerate(data):
- shouldUpdate = len(self.expressions) == len(list(filter(lambda x: x.find(item), self.expressions)))
- if shouldUpdate:
- if hasattr(val, '__call__'):
- val.__call__(data[index], data, index)
- else:
- data[index] = val
- return data
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.expressions)
-
- def __str__(self):
- return '[?%s]' % self.expressions
-
- def __eq__(self, other):
- return (isinstance(other, Filter)
- and self.expressions == other.expressions)
-
-
-class Expression(JSONPath):
- """The JSONQuery expression"""
-
- def __init__(self, target, op, value):
- self.target = target
- self.op = op
- self.value = value
-
- def find(self, datum):
- datum = self.target.find(DatumInContext.wrap(datum))
-
- if not datum:
- return []
- if self.op is None:
- return datum
-
- found = []
- for data in datum:
- value = data.value
- if isinstance(self.value, int):
- try:
- value = int(value)
- except ValueError:
- continue
- elif isinstance(self.value, bool):
- try:
- value = bool(value)
- except ValueError:
- continue
-
- if OPERATOR_MAP[self.op](value, self.value):
- found.append(data)
-
- return found
-
- def __eq__(self, other):
- return (isinstance(other, Expression) and
- self.target == other.target and
- self.op == other.op and
- self.value == other.value)
-
- def __repr__(self):
- if self.op is None:
- return '%s(%r)' % (self.__class__.__name__, self.target)
- else:
- return '%s(%r %s %r)' % (self.__class__.__name__,
- self.target, self.op, self.value)
-
- def __str__(self):
- if self.op is None:
- return '%s' % self.target
- else:
- return '%s %s %s' % (self.target, self.op, self.value)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/iterable.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/iterable.py
deleted file mode 100644
index 92ece5f1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/iterable.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-from .. import This, DatumInContext, JSONPath
-
-
-class SortedThis(This):
- """The JSONPath referring to the sorted version of the current object.
-
- Concrete syntax is '`sorted`' or [\\field,/field].
- """
- def __init__(self, expressions=None):
- self.expressions = expressions
-
- def _compare(self, left, right):
- left = DatumInContext.wrap(left)
- right = DatumInContext.wrap(right)
-
- for expr in self.expressions:
- field, reverse = expr
- l_datum = field.find(left)
- r_datum = field.find(right)
- if (not l_datum or not r_datum or
- len(l_datum) > 1 or len(r_datum) > 1 or
- l_datum[0].value == r_datum[0].value):
- # NOTE(sileht): should we do something if the expression
- # match multiple fields, for now ignore them
- continue
- elif l_datum[0].value < r_datum[0].value:
- return 1 if reverse else -1
- else:
- return -1 if reverse else 1
- return 0
-
- def find(self, datum):
- """Return sorted value of This if list or dict."""
- if isinstance(datum.value, dict) and self.expressions:
- return datum
-
- if isinstance(datum.value, dict) or isinstance(datum.value, list):
- key = (functools.cmp_to_key(self._compare)
- if self.expressions else None)
- return [DatumInContext.wrap(
- [value for value in sorted(datum.value, key=key)])]
- return datum
-
- def __eq__(self, other):
- return isinstance(other, Len)
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.expressions)
-
- def __str__(self):
- return '[?%s]' % self.expressions
-
-
-class Len(JSONPath):
- """The JSONPath referring to the len of the current object.
-
- Concrete syntax is '`len`'.
- """
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
- try:
- value = len(datum.value)
- except TypeError:
- return []
- else:
- return [DatumInContext(value,
- context=None,
- path=Len())]
-
- def __eq__(self, other):
- return isinstance(other, Len)
-
- def __str__(self):
- return '`len`'
-
- def __repr__(self):
- return 'Len()'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/parser.py
deleted file mode 100644
index 74459210..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/parser.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from .. import lexer
-from .. import parser
-from .. import Fields, This, Child
-
-from . import arithmetic as _arithmetic
-from . import filter as _filter
-from . import iterable as _iterable
-from . import string as _string
-
-
-class ExtendedJsonPathLexer(lexer.JsonPathLexer):
- """Custom LALR-lexer for JsonPath"""
- literals = lexer.JsonPathLexer.literals + ['?', '@', '+', '*', '/', '-']
- tokens = (['BOOL'] +
- parser.JsonPathLexer.tokens +
- ['FILTER_OP', 'SORT_DIRECTION', 'FLOAT'])
-
- t_FILTER_OP = r'=~|==?|<=|>=|!=|<|>'
-
- def t_BOOL(self, t):
- r'true|false'
- t.value = True if t.value == 'true' else False
- return t
-
- def t_SORT_DIRECTION(self, t):
- r',?\s*(/|\\)'
- t.value = t.value[-1]
- return t
-
- def t_ID(self, t):
- r'@?[a-zA-Z_][a-zA-Z0-9_@\-]*'
- # NOTE(sileht): This fixes the ID expression to be
- # able to use @ for `This` like any json query
- t.type = self.reserved_words.get(t.value, 'ID')
- return t
-
- def t_FLOAT(self, t):
- r'-?\d+\.\d+'
- t.value = float(t.value)
- return t
-
-
-class ExtentedJsonPathParser(parser.JsonPathParser):
- """Custom LALR-parser for JsonPath"""
-
- tokens = ExtendedJsonPathLexer.tokens
-
- def __init__(self, debug=False, lexer_class=None):
- lexer_class = lexer_class or ExtendedJsonPathLexer
- super(ExtentedJsonPathParser, self).__init__(debug, lexer_class)
-
- def p_jsonpath_operator_jsonpath(self, p):
- """jsonpath : NUMBER operator NUMBER
- | FLOAT operator FLOAT
- | ID operator ID
- | NUMBER operator jsonpath
- | FLOAT operator jsonpath
- | jsonpath operator NUMBER
- | jsonpath operator FLOAT
- | jsonpath operator jsonpath
- """
-
- # NOTE(sileht): If we have choice between a field or a string we
- # always choice string, because field can be full qualified
- # like $.foo == foo and where string can't.
- for i in [1, 3]:
- if (isinstance(p[i], Fields) and len(p[i].fields) == 1): # noqa
- p[i] = p[i].fields[0]
-
- p[0] = _arithmetic.Operation(p[1], p[2], p[3])
-
- def p_operator(self, p):
- """operator : '+'
- | '-'
- | '*'
- | '/'
- """
- p[0] = p[1]
-
- def p_jsonpath_named_operator(self, p):
- "jsonpath : NAMED_OPERATOR"
- if p[1] == 'len':
- p[0] = _iterable.Len()
- elif p[1] == 'sorted':
- p[0] = _iterable.SortedThis()
- elif p[1].startswith("split("):
- p[0] = _string.Split(p[1])
- elif p[1].startswith("sub("):
- p[0] = _string.Sub(p[1])
- elif p[1].startswith("str("):
- p[0] = _string.Str(p[1])
- else:
- super(ExtentedJsonPathParser, self).p_jsonpath_named_operator(p)
-
- def p_expression(self, p):
- """expression : jsonpath
- | jsonpath FILTER_OP ID
- | jsonpath FILTER_OP FLOAT
- | jsonpath FILTER_OP NUMBER
- | jsonpath FILTER_OP BOOL
- """
- if len(p) == 2:
- left, op, right = p[1], None, None
- else:
- __, left, op, right = p
- p[0] = _filter.Expression(left, op, right)
-
- def p_expressions_expression(self, p):
- "expressions : expression"
- p[0] = [p[1]]
-
- def p_expressions_and(self, p):
- "expressions : expressions '&' expressions"
- # TODO(sileht): implements '|'
- p[0] = p[1] + p[3]
-
- def p_expressions_parens(self, p):
- "expressions : '(' expressions ')'"
- p[0] = p[2]
-
- def p_filter(self, p):
- "filter : '?' expressions "
- p[0] = _filter.Filter(p[2])
-
- def p_jsonpath_filter(self, p):
- "jsonpath : jsonpath '[' filter ']'"
- p[0] = Child(p[1], p[3])
-
- def p_sort(self, p):
- "sort : SORT_DIRECTION jsonpath"
- p[0] = (p[2], p[1] != "/")
-
- def p_sorts_sort(self, p):
- "sorts : sort"
- p[0] = [p[1]]
-
- def p_sorts_comma(self, p):
- "sorts : sorts sorts"
- p[0] = p[1] + p[2]
-
- def p_jsonpath_sort(self, p):
- "jsonpath : jsonpath '[' sorts ']'"
- sort = _iterable.SortedThis(p[3])
- p[0] = Child(p[1], sort)
-
- def p_jsonpath_this(self, p):
- "jsonpath : '@'"
- p[0] = This()
-
- precedence = [
- ('left', '+', '-'),
- ('left', '*', '/'),
- ] + parser.JsonPathParser.precedence + [
- ('nonassoc', 'ID'),
- ]
-
-
-def parse(path, debug=False):
- return ExtentedJsonPathParser(debug=debug).parse(path)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/string.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/string.py
deleted file mode 100644
index 80ed890a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/ext/string.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-from .. import DatumInContext, This
-
-
-SUB = re.compile("sub\(/(.*)/,\s+(.*)\)")
-SPLIT = re.compile("split\((.),\s+(\d+),\s+(\d+|-1)\)")
-STR = re.compile("str\(\)")
-
-
-class DefintionInvalid(Exception):
- pass
-
-
-class Sub(This):
- """Regex substituor
-
- Concrete syntax is '`sub(/regex/, repl)`'
- """
-
- def __init__(self, method=None):
- m = SUB.match(method)
- if m is None:
- raise DefintionInvalid("%s is not valid" % method)
- self.expr = m.group(1).strip()
- self.repl = m.group(2).strip()
- self.regex = re.compile(self.expr)
- self.method = method
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
- value = self.regex.sub(self.repl, datum.value)
- if value == datum.value:
- return []
- else:
- return [DatumInContext.wrap(value)]
-
- def __eq__(self, other):
- return (isinstance(other, Sub) and self.method == other.method)
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.method)
-
- def __str__(self):
- return '`sub(/%s/, %s)`' % (self.expr, self.repl)
-
-
-class Split(This):
- """String splitter
-
- Concrete syntax is '`split(char, segment, max_split)`'
- """
-
- def __init__(self, method=None):
- m = SPLIT.match(method)
- if m is None:
- raise DefintionInvalid("%s is not valid" % method)
- self.char = m.group(1)
- self.segment = int(m.group(2))
- self.max_split = int(m.group(3))
- self.method = method
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
- try:
- value = datum.value.split(self.char, self.max_split)[self.segment]
- except Exception:
- return []
- return [DatumInContext.wrap(value)]
-
- def __eq__(self, other):
- return (isinstance(other, Split) and self.method == other.method)
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.method)
-
- def __str__(self):
- return '`%s`' % self.method
-
-
-class Str(This):
- """String converter
-
- Concrete syntax is '`str()`'
- """
-
- def __init__(self, method=None):
- m = STR.match(method)
- if m is None:
- raise DefintionInvalid("%s is not valid" % method)
- self.method = method
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
- value = str(datum.value)
- return [DatumInContext.wrap(value)]
-
- def __eq__(self, other):
- return (isinstance(other, Str) and self.method == other.method)
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.method)
-
- def __str__(self):
- return '`str()`'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/jsonpath.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/jsonpath.py
deleted file mode 100644
index f4f9d4b6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/jsonpath.py
+++ /dev/null
@@ -1,771 +0,0 @@
-from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
-import logging
-import six
-from six.moves import xrange
-from itertools import * # noqa
-from .exceptions import JSONPathError
-
-# Get logger name
-logger = logging.getLogger(__name__)
-
-# Turn on/off the automatic creation of id attributes
-# ... could be a kwarg pervasively but uses are rare and simple today
-auto_id_field = None
-
-NOT_SET = object()
-LIST_KEY = object()
-
-
-class JSONPath(object):
- """
- The base class for JSONPath abstract syntax; those
- methods stubbed here are the interface to supported
- JSONPath semantics.
- """
-
- def find(self, data):
- """
- All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s.
- They keep track of the path followed to the current location, so if the calling code
- has some opinion about that, it can be passed in here as a starting point.
- """
- raise NotImplementedError()
-
- def find_or_create(self, data):
- return self.find(data)
-
- def update(self, data, val):
- """
- Returns `data` with the specified path replaced by `val`. Only updates
- if the specified path exists.
- """
-
- raise NotImplementedError()
-
- def update_or_create(self, data, val):
- return self.update(data, val)
-
- def filter(self, fn, data):
- """
- Returns `data` with the specified path filtering nodes according
- the filter evaluation result returned by the filter function.
-
- Arguments:
- fn (function): unary function that accepts one argument
- and returns bool.
- data (dict|list|tuple): JSON object to filter.
- """
-
- raise NotImplementedError()
-
- def child(self, child):
- """
- Equivalent to Child(self, next) but with some canonicalization
- """
- if isinstance(self, This) or isinstance(self, Root):
- return child
- elif isinstance(child, This):
- return self
- elif isinstance(child, Root):
- return child
- else:
- return Child(self, child)
-
- def make_datum(self, value):
- if isinstance(value, DatumInContext):
- return value
- else:
- return DatumInContext(value, path=Root(), context=None)
-
-
-class DatumInContext(object):
- """
- Represents a datum along a path from a context.
-
- Essentially a zipper but with a structure represented by JsonPath,
- and where the context is more of a parent pointer than a proper
- representation of the context.
-
- For quick-and-dirty work, this proxies any non-special attributes
- to the underlying datum, but the actual datum can (and usually should)
- be retrieved via the `value` attribute.
-
- To place `datum` within another, use `datum.in_context(context=..., path=...)`
- which extends the path. If the datum already has a context, it places the entire
- context within that passed in, so an object can be built from the inside
- out.
- """
- @classmethod
- def wrap(cls, data):
- if isinstance(data, cls):
- return data
- else:
- return cls(data)
-
- def __init__(self, value, path=None, context=None):
- self.value = value
- self.path = path or This()
- self.context = None if context is None else DatumInContext.wrap(context)
-
- def in_context(self, context, path):
- context = DatumInContext.wrap(context)
-
- if self.context:
- return DatumInContext(value=self.value, path=self.path, context=context.in_context(path=path, context=context))
- else:
- return DatumInContext(value=self.value, path=path, context=context)
-
- @property
- def full_path(self):
- return self.path if self.context is None else self.context.full_path.child(self.path)
-
- @property
- def id_pseudopath(self):
- """
- Looks like a path, but with ids stuck in when available
- """
- try:
- pseudopath = Fields(str(self.value[auto_id_field]))
- except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
- pseudopath = self.path
-
- if self.context:
- return self.context.id_pseudopath.child(pseudopath)
- else:
- return pseudopath
-
- def __repr__(self):
- return '%s(value=%r, path=%r, context=%r)' % (self.__class__.__name__, self.value, self.path, self.context)
-
- def __eq__(self, other):
- return isinstance(other, DatumInContext) and other.value == self.value and other.path == self.path and self.context == other.context
-
-
-class AutoIdForDatum(DatumInContext):
- """
- This behaves like a DatumInContext, but the value is
- always the path leading up to it, not including the "id",
- and with any "id" fields along the way replacing the prior
- segment of the path
-
- For example, it will make "foo.bar.id" return a datum
- that behaves like DatumInContext(value="foo.bar", path="foo.bar.id").
-
- This is disabled by default; it can be turned on by
- settings the `auto_id_field` global to a value other
- than `None`.
- """
-
- def __init__(self, datum, id_field=None):
- """
- Invariant is that datum.path is the path from context to datum. The auto id
- will either be the id in the datum (if present) or the id of the context
- followed by the path to the datum.
-
- The path to this datum is always the path to the context, the path to the
- datum, and then the auto id field.
- """
- self.datum = datum
- self.id_field = id_field or auto_id_field
-
- @property
- def value(self):
- return str(self.datum.id_pseudopath)
-
- @property
- def path(self):
- return self.id_field
-
- @property
- def context(self):
- return self.datum
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, self.datum)
-
- def in_context(self, context, path):
- return AutoIdForDatum(self.datum.in_context(context=context, path=path))
-
- def __eq__(self, other):
- return isinstance(other, AutoIdForDatum) and other.datum == self.datum and self.id_field == other.id_field
-
-
-class Root(JSONPath):
- """
- The JSONPath referring to the "root" object. Concrete syntax is '$'.
- The root is the topmost datum without any context attached.
- """
-
- def find(self, data):
- if not isinstance(data, DatumInContext):
- return [DatumInContext(data, path=Root(), context=None)]
- else:
- if data.context is None:
- return [DatumInContext(data.value, context=None, path=Root())]
- else:
- return Root().find(data.context)
-
- def update(self, data, val):
- return val
-
- def filter(self, fn, data):
- return data if fn(data) else None
-
- def __str__(self):
- return '$'
-
- def __repr__(self):
- return 'Root()'
-
- def __eq__(self, other):
- return isinstance(other, Root)
-
-
-class This(JSONPath):
- """
- The JSONPath referring to the current datum. Concrete syntax is '@'.
- """
-
- def find(self, datum):
- return [DatumInContext.wrap(datum)]
-
- def update(self, data, val):
- return val
-
- def filter(self, fn, data):
- return data if fn(data) else None
-
- def __str__(self):
- return '`this`'
-
- def __repr__(self):
- return 'This()'
-
- def __eq__(self, other):
- return isinstance(other, This)
-
-
-class Child(JSONPath):
- """
- JSONPath that first matches the left, then the right.
- Concrete syntax is '.'
- """
-
- def __init__(self, left, right):
- self.left = left
- self.right = right
-
- def find(self, datum):
- """
- Extra special case: auto ids do not have children,
- so cut it off right now rather than auto id the auto id
- """
-
- return [submatch
- for subdata in self.left.find(datum)
- if not isinstance(subdata, AutoIdForDatum)
- for submatch in self.right.find(subdata)]
-
- def update(self, data, val):
- for datum in self.left.find(data):
- self.right.update(datum.value, val)
- return data
-
- def find_or_create(self, datum):
- datum = DatumInContext.wrap(datum)
- submatches = []
- for subdata in self.left.find_or_create(datum):
- if isinstance(subdata, AutoIdForDatum):
- # Extra special case: auto ids do not have children,
- # so cut it off right now rather than auto id the auto id
- continue
- for submatch in self.right.find_or_create(subdata):
- submatches.append(submatch)
- return submatches
-
- def update_or_create(self, data, val):
- for datum in self.left.find_or_create(data):
- self.right.update_or_create(datum.value, val)
- return _clean_list_keys(data)
-
- def filter(self, fn, data):
- for datum in self.left.find(data):
- self.right.filter(fn, datum.value)
- return data
-
- def __eq__(self, other):
- return isinstance(other, Child) and self.left == other.left and self.right == other.right
-
- def __str__(self):
- return '%s.%s' % (self.left, self.right)
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right)
-
-
-class Parent(JSONPath):
- """
- JSONPath that matches the parent node of the current match.
- Will crash if no such parent exists.
- Available via named operator `parent`.
- """
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
- return [datum.context]
-
- def __eq__(self, other):
- return isinstance(other, Parent)
-
- def __str__(self):
- return '`parent`'
-
- def __repr__(self):
- return 'Parent()'
-
-
-class Where(JSONPath):
- """
- JSONPath that first matches the left, and then
- filters for only those nodes that have
- a match on the right.
-
- WARNING: Subject to change. May want to have "contains"
- or some other better word for it.
- """
-
- def __init__(self, left, right):
- self.left = left
- self.right = right
-
- def find(self, data):
- return [subdata for subdata in self.left.find(data) if self.right.find(subdata)]
-
- def update(self, data, val):
- for datum in self.find(data):
- datum.path.update(data, val)
- return data
-
- def filter(self, fn, data):
- for datum in self.find(data):
- datum.path.filter(fn, datum.value)
- return data
-
- def __str__(self):
- return '%s where %s' % (self.left, self.right)
-
- def __eq__(self, other):
- return isinstance(other, Where) and other.left == self.left and other.right == self.right
-
-class Descendants(JSONPath):
- """
- JSONPath that matches first the left expression then any descendant
- of it which matches the right expression.
- """
-
- def __init__(self, left, right):
- self.left = left
- self.right = right
-
- def find(self, datum):
- # .. ==> . ( | *.. | [*]..)
- #
- # With with a wonky caveat that since Slice() has funky coercions
- # we cannot just delegate to that equivalence or we'll hit an
- # infinite loop. So right here we implement the coercion-free version.
-
- # Get all left matches into a list
- left_matches = self.left.find(datum)
- if not isinstance(left_matches, list):
- left_matches = [left_matches]
-
- def match_recursively(datum):
- right_matches = self.right.find(datum)
-
- # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
- if isinstance(datum.value, list):
- recursive_matches = [submatch
- for i in range(0, len(datum.value))
- for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))]
-
- elif isinstance(datum.value, dict):
- recursive_matches = [submatch
- for field in datum.value.keys()
- for submatch in match_recursively(DatumInContext(datum.value[field], context=datum, path=Fields(field)))]
-
- else:
- recursive_matches = []
-
- return right_matches + list(recursive_matches)
-
- # TODO: repeatable iterator instead of list?
- return [submatch
- for left_match in left_matches
- for submatch in match_recursively(left_match)]
-
- def is_singular(self):
- return False
-
- def update(self, data, val):
- # Get all left matches into a list
- left_matches = self.left.find(data)
- if not isinstance(left_matches, list):
- left_matches = [left_matches]
-
- def update_recursively(data):
- # Update only mutable values corresponding to JSON types
- if not (isinstance(data, list) or isinstance(data, dict)):
- return
-
- self.right.update(data, val)
-
- # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
- if isinstance(data, list):
- for i in range(0, len(data)):
- update_recursively(data[i])
-
- elif isinstance(data, dict):
- for field in data.keys():
- update_recursively(data[field])
-
- for submatch in left_matches:
- update_recursively(submatch.value)
-
- return data
-
- def filter(self, fn, data):
- # Get all left matches into a list
- left_matches = self.left.find(data)
- if not isinstance(left_matches, list):
- left_matches = [left_matches]
-
- def filter_recursively(data):
- # Update only mutable values corresponding to JSON types
- if not (isinstance(data, list) or isinstance(data, dict)):
- return
-
- self.right.filter(fn, data)
-
- # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
- if isinstance(data, list):
- for i in range(0, len(data)):
- filter_recursively(data[i])
-
- elif isinstance(data, dict):
- for field in data.keys():
- filter_recursively(data[field])
-
- for submatch in left_matches:
- filter_recursively(submatch.value)
-
- return data
-
- def __str__(self):
- return '%s..%s' % (self.left, self.right)
-
- def __eq__(self, other):
- return isinstance(other, Descendants) and self.left == other.left and self.right == other.right
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right)
-
-
-class Union(JSONPath):
- """
- JSONPath that returns the union of the results of each match.
- This is pretty shoddily implemented for now. The nicest semantics
- in case of mismatched bits (list vs atomic) is to put
- them all in a list, but I haven't done that yet.
-
- WARNING: Any appearance of this being the _concatenation_ is
- coincidence. It may even be a bug! (or laziness)
- """
- def __init__(self, left, right):
- self.left = left
- self.right = right
-
- def is_singular(self):
- return False
-
- def find(self, data):
- return self.left.find(data) + self.right.find(data)
-
-class Intersect(JSONPath):
- """
- JSONPath for bits that match *both* patterns.
-
- This can be accomplished a couple of ways. The most
- efficient is to actually build the intersected
- AST as in building a state machine for matching the
- intersection of regular languages. The next
- idea is to build a filtered data and match against
- that.
- """
- def __init__(self, left, right):
- self.left = left
- self.right = right
-
- def is_singular(self):
- return False
-
- def find(self, data):
- raise NotImplementedError()
-
-
-class Fields(JSONPath):
- """
- JSONPath referring to some field of the current object.
- Concrete syntax ix comma-separated field names.
-
- WARNING: If '*' is any of the field names, then they will
- all be returned.
- """
-
- def __init__(self, *fields):
- self.fields = fields
-
- @staticmethod
- def get_field_datum(datum, field, create):
- if field == auto_id_field:
- return AutoIdForDatum(datum)
- try:
- field_value = datum.value.get(field, NOT_SET)
- if field_value is NOT_SET:
- if create:
- datum.value[field] = field_value = {}
- else:
- return None
- return DatumInContext(field_value, path=Fields(field), context=datum)
- except (TypeError, AttributeError):
- return None
-
- def reified_fields(self, datum):
- if '*' not in self.fields:
- return self.fields
- else:
- try:
- fields = tuple(datum.value.keys())
- return fields if auto_id_field is None else fields + (auto_id_field,)
- except AttributeError:
- return ()
-
- def find(self, datum):
- return self._find_base(datum, create=False)
-
- def find_or_create(self, datum):
- return self._find_base(datum, create=True)
-
- def _find_base(self, datum, create):
- datum = DatumInContext.wrap(datum)
- field_data = [self.get_field_datum(datum, field, create)
- for field in self.reified_fields(datum)]
- return [fd for fd in field_data if fd is not None]
-
- def update(self, data, val):
- return self._update_base(data, val, create=False)
-
- def update_or_create(self, data, val):
- return self._update_base(data, val, create=True)
-
- def _update_base(self, data, val, create):
- if data is not None:
- for field in self.reified_fields(DatumInContext.wrap(data)):
- if field not in data and create:
- data[field] = {}
- if field in data:
- if hasattr(val, '__call__'):
- val(data[field], data, field)
- else:
- data[field] = val
- return data
-
- def filter(self, fn, data):
- if data is not None:
- for field in self.reified_fields(DatumInContext.wrap(data)):
- if field in data:
- if fn(data[field]):
- data.pop(field)
- return data
-
- def __str__(self):
- return ','.join(map(str, self.fields))
-
- def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, ','.join(map(repr, self.fields)))
-
- def __eq__(self, other):
- return isinstance(other, Fields) and tuple(self.fields) == tuple(other.fields)
-
-
-class Index(JSONPath):
- """
- JSONPath that matches indices of the current datum, or none if not large enough.
- Concrete syntax is brackets.
-
- WARNING: If the datum is None or not long enough, it will not crash but will not match anything.
- NOTE: For the concrete syntax of `[*]`, the abstract syntax is a Slice() with no parameters (equiv to `[:]`
- """
-
- def __init__(self, index):
- self.index = index
-
- def find(self, datum):
- return self._find_base(datum, create=False)
-
- def find_or_create(self, datum):
- return self._find_base(datum, create=True)
-
- def _find_base(self, datum, create):
- datum = DatumInContext.wrap(datum)
- if create:
- if datum.value == {}:
- datum.value = _create_list_key(datum.value)
- self._pad_value(datum.value)
- if datum.value and len(datum.value) > self.index:
- return [DatumInContext(datum.value[self.index], path=self, context=datum)]
- else:
- return []
-
- def update(self, data, val):
- return self._update_base(data, val, create=False)
-
- def update_or_create(self, data, val):
- return self._update_base(data, val, create=True)
-
- def _update_base(self, data, val, create):
- if create:
- if data == {}:
- data = _create_list_key(data)
- self._pad_value(data)
- if hasattr(val, '__call__'):
- val.__call__(data[self.index], data, self.index)
- elif len(data) > self.index:
- data[self.index] = val
- return data
-
- def filter(self, fn, data):
- if fn(data[self.index]):
- data.pop(self.index) # relies on mutation :(
- return data
-
- def __eq__(self, other):
- return isinstance(other, Index) and self.index == other.index
-
- def __str__(self):
- return '[%i]' % self.index
-
- def __repr__(self):
- return '%s(index=%r)' % (self.__class__.__name__, self.index)
-
- def _pad_value(self, value):
- if len(value) <= self.index:
- pad = self.index - len(value) + 1
- value += [{} for __ in range(pad)]
-
-
-class Slice(JSONPath):
- """
- JSONPath matching a slice of an array.
-
- Because of a mismatch between JSON and XML when schema-unaware,
- this always returns an iterable; if the incoming data
- was not a list, then it returns a one element list _containing_ that
- data.
-
- Consider these two docs, and their schema-unaware translation to JSON:
-
- hello ==> {"a": {"b": "hello"}}
- hello goodbye ==> {"a": {"b": ["hello", "goodbye"]}}
-
- If there were a schema, it would be known that "b" should always be an
- array (unless the schema were wonky, but that is too much to fix here)
- so when querying with JSON if the one writing the JSON knows that it
- should be an array, they can write a slice operator and it will coerce
- a non-array value to an array.
-
- This may be a bit unfortunate because it would be nice to always have
- an iterator, but dictionaries and other objects may also be iterable,
- so this is the compromise.
- """
- def __init__(self, start=None, end=None, step=None):
- self.start = start
- self.end = end
- self.step = step
-
- def find(self, datum):
- datum = DatumInContext.wrap(datum)
-
- # Used for catching null value instead of empty list in path
- if not datum.value:
- return []
- # Here's the hack. If it is a dictionary or some kind of constant,
- # put it in a single-element list
- if (isinstance(datum.value, dict) or isinstance(datum.value, six.integer_types) or isinstance(datum.value, six.string_types)):
- return self.find(DatumInContext([datum.value], path=datum.path, context=datum.context))
-
- # Some iterators do not support slicing but we can still
- # at least work for '*'
- if self.start == None and self.end == None and self.step == None:
- return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in xrange(0, len(datum.value))]
- else:
- return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))[self.start:self.end:self.step]]
-
- def update(self, data, val):
- for datum in self.find(data):
- datum.path.update(data, val)
- return data
-
- def filter(self, fn, data):
- while True:
- length = len(data)
- for datum in self.find(data):
- data = datum.path.filter(fn, data)
- if len(data) < length:
- break
-
- if length == len(data):
- break
- return data
-
- def __str__(self):
- if self.start == None and self.end == None and self.step == None:
- return '[*]'
- else:
- return '[%s%s%s]' % (self.start or '',
- ':%d'%self.end if self.end else '',
- ':%d'%self.step if self.step else '')
-
- def __repr__(self):
- return '%s(start=%r,end=%r,step=%r)' % (self.__class__.__name__, self.start, self.end, self.step)
-
- def __eq__(self, other):
- return isinstance(other, Slice) and other.start == self.start and self.end == other.end and other.step == self.step
-
-
-def _create_list_key(dict_):
- """
- Adds a list to a dictionary by reference and returns the list.
-
- See `_clean_list_keys()`
- """
- dict_[LIST_KEY] = new_list = [{}]
- return new_list
-
-
-def _clean_list_keys(dict_):
- """
- Replace {LIST_KEY: ['foo', 'bar']} with ['foo', 'bar'].
-
- >>> _clean_list_keys({LIST_KEY: ['foo', 'bar']})
- ['foo', 'bar']
-
- """
- for key, value in dict_.items():
- if isinstance(value, dict):
- dict_[key] = _clean_list_keys(value)
- elif isinstance(value, list):
- dict_[key] = [_clean_list_keys(v) if isinstance(v, dict) else v
- for v in value]
- if LIST_KEY in dict_:
- return dict_[LIST_KEY]
- return dict_
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/lexer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/lexer.py
deleted file mode 100644
index 40149337..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/lexer.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
-import sys
-import logging
-
-import ply.lex
-
-from jsonpath_ng.exceptions import JsonPathLexerError
-
-logger = logging.getLogger(__name__)
-
-
-class JsonPathLexer(object):
- '''
- A Lexical analyzer for JsonPath.
- '''
-
- def __init__(self, debug=False):
- self.debug = debug
- if self.__doc__ == None:
- raise JsonPathLexerError('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
-
- def tokenize(self, string):
- '''
- Maps a string to an iterator over tokens. In other words: [char] -> [token]
- '''
-
- new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
- new_lexer.latest_newline = 0
- new_lexer.string_value = None
- new_lexer.input(string)
-
- while True:
- t = new_lexer.token()
- if t is None: break
- t.col = t.lexpos - new_lexer.latest_newline
- yield t
-
- if new_lexer.string_value is not None:
- raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
-
- # ============== PLY Lexer specification ==================
- #
- # This probably should be private but:
- # - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
- # - things like `literals` might be a legitimate part of the public interface.
- #
- # Anyhow, it is pythonic to give some rope to hang oneself with :-)
-
- literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&', '~']
-
- reserved_words = { 'where': 'WHERE' }
-
- tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
-
- states = [ ('singlequote', 'exclusive'),
- ('doublequote', 'exclusive'),
- ('backquote', 'exclusive') ]
-
- # Normal lexing, rather easy
- t_DOUBLEDOT = r'\.\.'
- t_ignore = ' \t'
-
- def t_ID(self, t):
- r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
- t.type = self.reserved_words.get(t.value, 'ID')
- return t
-
- def t_NUMBER(self, t):
- r'-?\d+'
- t.value = int(t.value)
- return t
-
-
- # Single-quoted strings
- t_singlequote_ignore = ''
- def t_singlequote(self, t):
- r"'"
- t.lexer.string_start = t.lexer.lexpos
- t.lexer.string_value = ''
- t.lexer.push_state('singlequote')
-
- def t_singlequote_content(self, t):
- r"[^'\\]+"
- t.lexer.string_value += t.value
-
- def t_singlequote_escape(self, t):
- r'\\.'
- t.lexer.string_value += t.value[1]
-
- def t_singlequote_end(self, t):
- r"'"
- t.value = t.lexer.string_value
- t.type = 'ID'
- t.lexer.string_value = None
- t.lexer.pop_state()
- return t
-
- def t_singlequote_error(self, t):
- raise JsonPathLexerError('Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
-
-
- # Double-quoted strings
- t_doublequote_ignore = ''
- def t_doublequote(self, t):
- r'"'
- t.lexer.string_start = t.lexer.lexpos
- t.lexer.string_value = ''
- t.lexer.push_state('doublequote')
-
- def t_doublequote_content(self, t):
- r'[^"\\]+'
- t.lexer.string_value += t.value
-
- def t_doublequote_escape(self, t):
- r'\\.'
- t.lexer.string_value += t.value[1]
-
- def t_doublequote_end(self, t):
- r'"'
- t.value = t.lexer.string_value
- t.type = 'ID'
- t.lexer.string_value = None
- t.lexer.pop_state()
- return t
-
- def t_doublequote_error(self, t):
- raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
-
-
- # Back-quoted "magic" operators
- t_backquote_ignore = ''
- def t_backquote(self, t):
- r'`'
- t.lexer.string_start = t.lexer.lexpos
- t.lexer.string_value = ''
- t.lexer.push_state('backquote')
-
- def t_backquote_escape(self, t):
- r'\\.'
- t.lexer.string_value += t.value[1]
-
- def t_backquote_content(self, t):
- r"[^`\\]+"
- t.lexer.string_value += t.value
-
- def t_backquote_end(self, t):
- r'`'
- t.value = t.lexer.string_value
- t.type = 'NAMED_OPERATOR'
- t.lexer.string_value = None
- t.lexer.pop_state()
- return t
-
- def t_backquote_error(self, t):
- raise JsonPathLexerError('Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
-
-
- # Counting lines, handling errors
- def t_newline(self, t):
- r'\n'
- t.lexer.lineno += 1
- t.lexer.latest_newline = t.lexpos
-
- def t_error(self, t):
- raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
-
-if __name__ == '__main__':
- logging.basicConfig()
- lexer = JsonPathLexer(debug=True)
- for token in lexer.tokenize(sys.stdin.read()):
- print('%-20s%s' % (token.value, token.type))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/parser.py
deleted file mode 100644
index 72333c20..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_ng/parser.py
+++ /dev/null
@@ -1,201 +0,0 @@
-from __future__ import (
- print_function,
- absolute_import,
- division,
- generators,
- nested_scopes,
-)
-import sys
-import os.path
-
-import ply.yacc
-
-from jsonpath_ng.exceptions import JsonPathParserError
-from jsonpath_ng.jsonpath import *
-from jsonpath_ng.lexer import JsonPathLexer
-
-logger = logging.getLogger(__name__)
-
-
-def parse(string):
- return JsonPathParser().parse(string)
-
-
-class JsonPathParser(object):
- '''
- An LALR-parser for JsonPath
- '''
-
- tokens = JsonPathLexer.tokens
-
- def __init__(self, debug=False, lexer_class=None):
- if self.__doc__ is None:
- raise JsonPathParserError(
- 'Docstrings have been removed! By design of PLY, '
- 'jsonpath-rw requires docstrings. You must not use '
- 'PYTHONOPTIMIZE=2 or python -OO.'
- )
-
- self.debug = debug
- self.lexer_class = lexer_class or JsonPathLexer # Crufty but works around statefulness in PLY
-
- def parse(self, string, lexer = None):
- lexer = lexer or self.lexer_class()
- return self.parse_token_stream(lexer.tokenize(string))
-
- def parse_token_stream(self, token_iterator, start_symbol='jsonpath'):
-
- # Since PLY has some crufty aspects and dumps files, we try to keep them local
- # However, we need to derive the name of the output Python file :-/
- output_directory = os.path.dirname(__file__)
- try:
- module_name = os.path.splitext(os.path.split(__file__)[1])[0]
- except:
- module_name = __name__
-
- parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
-
- # And we regenerate the parse table every time; it doesn't actually take that long!
- new_parser = ply.yacc.yacc(module=self,
- debug=self.debug,
- tabmodule = parsing_table_module,
- outputdir = output_directory,
- write_tables=0,
- start = start_symbol,
- errorlog = logger)
-
- return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
-
- # ===================== PLY Parser specification =====================
-
- precedence = [
- ('left', ','),
- ('left', 'DOUBLEDOT'),
- ('left', '.'),
- ('left', '|'),
- ('left', '&'),
- ('left', 'WHERE'),
- ]
-
- def p_error(self, t):
- raise JsonPathParserError('Parse error at %s:%s near token %s (%s)'
- % (t.lineno, t.col, t.value, t.type))
-
- def p_jsonpath_binop(self, p):
- """jsonpath : jsonpath '.' jsonpath
- | jsonpath DOUBLEDOT jsonpath
- | jsonpath WHERE jsonpath
- | jsonpath '|' jsonpath
- | jsonpath '&' jsonpath"""
- op = p[2]
-
- if op == '.':
- p[0] = Child(p[1], p[3])
- elif op == '..':
- p[0] = Descendants(p[1], p[3])
- elif op == 'where':
- p[0] = Where(p[1], p[3])
- elif op == '|':
- p[0] = Union(p[1], p[3])
- elif op == '&':
- p[0] = Intersect(p[1], p[3])
-
- def p_jsonpath_fields(self, p):
- "jsonpath : fields_or_any"
- p[0] = Fields(*p[1])
-
- def p_jsonpath_named_operator(self, p):
- "jsonpath : NAMED_OPERATOR"
- if p[1] == 'this':
- p[0] = This()
- elif p[1] == 'parent':
- p[0] = Parent()
- else:
- raise JsonPathParserError('Unknown named operator `%s` at %s:%s'
- % (p[1], p.lineno(1), p.lexpos(1)))
-
- def p_jsonpath_root(self, p):
- "jsonpath : '$'"
- p[0] = Root()
-
- def p_jsonpath_idx(self, p):
- "jsonpath : '[' idx ']'"
- p[0] = p[2]
-
- def p_jsonpath_slice(self, p):
- "jsonpath : '[' slice ']'"
- p[0] = p[2]
-
- def p_jsonpath_fieldbrackets(self, p):
- "jsonpath : '[' fields ']'"
- p[0] = Fields(*p[2])
-
- def p_jsonpath_child_fieldbrackets(self, p):
- "jsonpath : jsonpath '[' fields ']'"
- p[0] = Child(p[1], Fields(*p[3]))
-
- def p_jsonpath_child_idxbrackets(self, p):
- "jsonpath : jsonpath '[' idx ']'"
- p[0] = Child(p[1], p[3])
-
- def p_jsonpath_child_slicebrackets(self, p):
- "jsonpath : jsonpath '[' slice ']'"
- p[0] = Child(p[1], p[3])
-
- def p_jsonpath_parens(self, p):
- "jsonpath : '(' jsonpath ')'"
- p[0] = p[2]
-
- # Because fields in brackets cannot be '*' - that is reserved for array indices
- def p_fields_or_any(self, p):
- """fields_or_any : fields
- | '*' """
- if p[1] == '*':
- p[0] = ['*']
- else:
- p[0] = p[1]
-
- def p_fields_id(self, p):
- "fields : ID"
- p[0] = [p[1]]
-
- def p_fields_comma(self, p):
- "fields : fields ',' fields"
- p[0] = p[1] + p[3]
-
- def p_idx(self, p):
- "idx : NUMBER"
- p[0] = Index(p[1])
-
- def p_slice_any(self, p):
- "slice : '*'"
- p[0] = Slice()
-
- def p_slice(self, p): # Currently does not support `step`
- "slice : maybe_int ':' maybe_int"
- p[0] = Slice(start=p[1], end=p[3])
-
- def p_maybe_int(self, p):
- """maybe_int : NUMBER
- | empty"""
- p[0] = p[1]
-
- def p_empty(self, p):
- 'empty :'
- p[0] = None
-
-class IteratorToTokenStream(object):
- def __init__(self, iterator):
- self.iterator = iterator
-
- def token(self):
- try:
- return next(self.iterator)
- except StopIteration:
- return None
-
-
-if __name__ == '__main__':
- logging.basicConfig()
- parser = JsonPathParser(debug=True)
- print(parser.parse(sys.stdin.read()))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/METADATA
deleted file mode 100644
index 522652fe..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/METADATA
+++ /dev/null
@@ -1,248 +0,0 @@
-Metadata-Version: 2.1
-Name: jsonpath-rw
-Version: 1.4.0
-Summary: A robust and significantly extended implementation of JSONPath for Python, with a clear AST for metaprogramming.
-Home-page: https://github.com/kennknowles/python-jsonpath-rw
-Author: Kenneth Knowles
-Author-email: kenn.knowles@gmail.com
-License: Apache 2.0
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
-Requires-Dist: ply
-Requires-Dist: decorator
-Requires-Dist: six
-
-Python JSONPath RW
-==================
-
-https://github.com/kennknowles/python-jsonpath-rw
-
-|Build Status| |Test coverage| |PyPi version| |PyPi downloads|
-
-This library provides a robust and significantly extended implementation
-of JSONPath for Python. It is tested with Python 2.6, 2.7, 3.2, 3.3.
-*(On travis-ci there is a segfault when running the tests with pypy; I don't think the problem lies with this library)*.
-
-This library differs from other JSONPath implementations in that it is a
-full *language* implementation, meaning the JSONPath expressions are
-first class objects, easy to analyze, transform, parse, print, and
-extend. (You can also execute them :-)
-
-Quick Start
------------
-
-To install, use pip:
-
-::
-
- $ pip install jsonpath-rw
-
-Then:
-
-.. code:: python
-
- $ python
-
- >>> from jsonpath_rw import jsonpath, parse
-
- # A robust parser, not just a regex. (Makes powerful extensions possible; see below)
- >>> jsonpath_expr = parse('foo[*].baz')
-
- # Extracting values is easy
- >>> [match.value for match in jsonpath_expr.find({'foo': [{'baz': 1}, {'baz': 2}]})]
- [1, 2]
-
- # Matches remember where they came from
- >>> [str(match.full_path) for match in jsonpath_expr.find({'foo': [{'baz': 1}, {'baz': 2}]})]
- ['foo.[0].baz', 'foo.[1].baz']
-
- # And this can be useful for automatically providing ids for bits of data that do not have them (currently a global switch)
- >>> jsonpath.auto_id_field = 'id'
- >>> [match.value for match in parse('foo[*].id').find({'foo': [{'id': 'bizzle'}, {'baz': 3}]})]
- ['foo.bizzle', 'foo.[1]']
-
- # A handy extension: named operators like `parent`
- >>> [match.value for match in parse('a.*.b.`parent`.c').find({'a': {'x': {'b': 1, 'c': 'number one'}, 'y': {'b': 2, 'c': 'number two'}}})]
- ['number two', 'number one']
-
- # You can also build expressions directly quite easily
- >>> from jsonpath_rw.jsonpath import Fields
- >>> from jsonpath_rw.jsonpath import Slice
-
- >>> jsonpath_expr_direct = Fields('foo').child(Slice('*')).child(Fields('baz')) # This is equivalent
-
-JSONPath Syntax
----------------
-
-The JSONPath syntax supported by this library includes some additional
-features and omits some problematic features (those that make it
-unportable). In particular, some new operators such as ``|`` and
-``where`` are available, and parentheses are used for grouping not for
-callbacks into Python, since with these changes the language is not
-trivially associative. Also, fields may be quoted whether or not they
-are contained in brackets.
-
-Atomic expressions:
-
-+-----------------------+---------------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=======================+=============================================================================================+
-| ``$`` | The root object |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ```this``` | The "current" object. |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ```foo``` | More generally, this syntax allows "named operators" to extend JSONPath is arbitrary ways |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| *field* | Specified field(s), described below |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ``[`` *field* ``]`` | Same as *field* |
-+-----------------------+---------------------------------------------------------------------------------------------+
-| ``[`` *idx* ``]`` | Array access, described below (this is always unambiguous with field access) |
-+-----------------------+---------------------------------------------------------------------------------------------+
-
-Jsonpath operators:
-
-+-------------------------------------+------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=====================================+====================================================================================+
-| *jsonpath1* ``.`` *jsonpath2* | All nodes matched by *jsonpath2* starting at any node matching *jsonpath1* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath* ``[`` *whatever* ``]`` | Same as *jsonpath*\ ``.``\ *whatever* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``..`` *jsonpath2* | All nodes matched by *jsonpath2* that descend from any node matching *jsonpath1* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``where`` *jsonpath2* | Any nodes matching *jsonpath1* with a child matching *jsonpath2* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-| *jsonpath1* ``|`` *jsonpath2* | Any nodes matching the union of *jsonpath1* and *jsonpath2* |
-+-------------------------------------+------------------------------------------------------------------------------------+
-
-Field specifiers ( *field* ):
-
-+-------------------------+-------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=========================+=====================================================================================+
-| ``fieldname`` | the field ``fieldname`` (from the "current" object) |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``"fieldname"`` | same as above, for allowing special characters in the fieldname |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``'fieldname'`` | ditto |
-+-------------------------+-------------------------------------------------------------------------------------+
-| ``*`` | any field |
-+-------------------------+-------------------------------------------------------------------------------------+
-| *field* ``,`` *field* | either of the named fields (you can always build equivalent jsonpath using ``|``) |
-+-------------------------+-------------------------------------------------------------------------------------+
-
-Array specifiers ( *idx* ):
-
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| Syntax | Meaning |
-+=========================================+=======================================================================================+
-| ``[``\ *n*\ ``]`` | array index (may be comma-separated list) |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| ``[``\ *start*\ ``?:``\ *end*\ ``?]`` | array slicing (note that *step* is unimplemented only due to lack of need thus far) |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-| ``[*]`` | any array index |
-+-----------------------------------------+---------------------------------------------------------------------------------------+
-
-Programmatic JSONPath
----------------------
-
-If you are programming in Python and would like a more robust way to
-create JSONPath expressions that does not depend on a parser, it is very
-easy to do so directly, and here are some examples:
-
-- ``Root()``
-- ``Slice(start=0, end=None, step=None)``
-- ``Fields('foo', 'bar')``
-- ``Index(42)``
-- ``Child(Fields('foo'), Index(42))``
-- ``Where(Slice(), Fields('subfield'))``
-- ``Descendants(jsonpath, jsonpath)``
-
-Extensions
-----------
-
-- *Path data*: The result of ``JsonPath.find`` provide detailed context
- and path data so it is easy to traverse to parent objects, print full
- paths to pieces of data, and generate automatic ids.
-- *Automatic Ids*: If you set ``jsonpath_rw.auto_id_field`` to a value
- other than None, then for any piece of data missing that field, it
- will be replaced by the JSONPath to it, giving automatic unique ids
- to any piece of data. These ids will take into account any ids
- already present as well.
-- *Named operators*: Instead of using ``@`` to reference the currently
- object, this library uses ```this```. In general, any string
- contained in backquotes can be made to be a new operator, currently
- by extending the library.
-
-More to explore
----------------
-
-There are way too many jsonpath implementations out there to discuss.
-Some are robust, some are toy projects that still work fine, some are
-exercises. There will undoubtedly be many more. This one is made for use
-in released, maintained code, and in particular for programmatic access
-to the abstract syntax and extension. But JSONPath at its simplest just
-isn't that complicated, so you can probably use any of them
-successfully. Why not this one?
-
-The original proposal, as far as I know:
-
-- `JSONPath - XPath for
- JSON `__ by Stefan Goessner.
-
-Special note about PLY and docstrings
--------------------------------------
-
-The main parsing toolkit underlying this library,
-`PLY `__, does not work with docstrings
-removed. For example, ``PYTHONOPTIMIZE=2`` and ``python -OO`` will both
-cause a failure.
-
-Contributors
-------------
-
-This package is authored and maintained by:
-
-- `Kenn Knowles `__
- (`@kennknowles `__)
-
-with the help of patches submitted by `these contributors `__.
-
-Copyright and License
----------------------
-
-Copyright 2013- Kenneth Knowles
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may
-not use this file except in compliance with the License. You may obtain
-a copy of the License at
-
-::
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-.. |Build Status| image:: https://travis-ci.org/kennknowles/python-jsonpath-rw.png?branch=master
- :target: https://travis-ci.org/kennknowles/python-jsonpath-rw
-.. |Test coverage| image:: https://coveralls.io/repos/kennknowles/python-jsonpath-rw/badge.png?branch=master
- :target: https://coveralls.io/r/kennknowles/python-jsonpath-rw
-.. |PyPi version| image:: https://pypip.in/v/jsonpath-rw/badge.png
- :target: https://pypi.python.org/pypi/jsonpath-rw
-.. |PyPi downloads| image:: https://pypip.in/d/jsonpath-rw/badge.png
- :target: https://pypi.python.org/pypi/jsonpath-rw
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD
deleted file mode 100644
index 033b725d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD
+++ /dev/null
@@ -1,14 +0,0 @@
-../../bin/jsonpath.py,sha256=ATsDNnA0stK9yFqmR87jytRPFf0tHZFriffpqAjbmuE,237
-jsonpath_rw-1.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jsonpath_rw-1.4.0.dist-info/METADATA,sha256=YpILgtomSGnBnMkWAnoiCjCltr_Xg-NRUASouddqa5I,13258
-jsonpath_rw-1.4.0.dist-info/RECORD,,
-jsonpath_rw-1.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonpath_rw-1.4.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
-jsonpath_rw-1.4.0.dist-info/entry_points.txt,sha256=rhhrY2M1qcBqXZdcsE7b3WzZYR9K7jqG8JZ_BQBj3OA,70
-jsonpath_rw-1.4.0.dist-info/top_level.txt,sha256=ZkYenrz7C0bQMlN3Nn0VYMWQc2Vm5zfcGQTOJowecEA,12
-jsonpath_rw/__init__.py,sha256=ptcHrCa_lKTgP_OLUyEeiFSfolhQG5wsxzAckU7OZSY,73
-jsonpath_rw/bin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonpath_rw/bin/jsonpath.py,sha256=BeSDueY6cK9CYwoAoBDU66eUwk7_y2iqWvQhcHqcgsE,2151
-jsonpath_rw/jsonpath.py,sha256=D5HMd1JnMMKVVukSRBGgvFaRoJiYPbgrKZEzBA7Elc0,17243
-jsonpath_rw/lexer.py,sha256=G24SM_V2WrHCAEPVxcbbJX5saFW-pTAKJ0nTXFCsy5A,5319
-jsonpath_rw/parser.py,sha256=SBSswvg6DdTyg-h2uBTk18UAI-2CAAnwcfMPryu8oPY,5628
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL
deleted file mode 100644
index becc9a66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/entry_points.txt
deleted file mode 100644
index acf2f782..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-jsonpath.py = jsonpath_rw.bin.jsonpath:entry_point
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/top_level.txt
deleted file mode 100644
index 7675e363..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw-1.4.0.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsonpath_rw
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/bin/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/bin/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/bin/jsonpath.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/bin/jsonpath.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/jsonpath.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/jsonpath.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/lexer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/lexer.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonpath_rw/parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/COPYING b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/COPYING
deleted file mode 100644
index af9cfbdb..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2013 Julian Berman
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/METADATA
deleted file mode 100644
index aef9b18d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/METADATA
+++ /dev/null
@@ -1,224 +0,0 @@
-Metadata-Version: 2.1
-Name: jsonschema
-Version: 3.2.0
-Summary: An implementation of JSON Schema validation for Python
-Home-page: https://github.com/Julian/jsonschema
-Author: Julian Berman
-Author-email: Julian@GrayVines.com
-License: UNKNOWN
-Project-URL: Docs, https://python-jsonschema.readthedocs.io/en/latest/
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Dist: attrs (>=17.4.0)
-Requires-Dist: pyrsistent (>=0.14.0)
-Requires-Dist: setuptools
-Requires-Dist: six (>=1.11.0)
-Requires-Dist: functools32 ; python_version < "3"
-Requires-Dist: importlib-metadata ; python_version < "3.8"
-Provides-Extra: format
-Requires-Dist: idna ; extra == 'format'
-Requires-Dist: jsonpointer (>1.13) ; extra == 'format'
-Requires-Dist: rfc3987 ; extra == 'format'
-Requires-Dist: strict-rfc3339 ; extra == 'format'
-Requires-Dist: webcolors ; extra == 'format'
-Provides-Extra: format_nongpl
-Requires-Dist: idna ; extra == 'format_nongpl'
-Requires-Dist: jsonpointer (>1.13) ; extra == 'format_nongpl'
-Requires-Dist: webcolors ; extra == 'format_nongpl'
-Requires-Dist: rfc3986-validator (>0.1.0) ; extra == 'format_nongpl'
-Requires-Dist: rfc3339-validator ; extra == 'format_nongpl'
-
-==========
-jsonschema
-==========
-
-|PyPI| |Pythons| |Travis| |AppVeyor| |Codecov| |ReadTheDocs|
-
-.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg
- :alt: PyPI version
- :target: https://pypi.org/project/jsonschema/
-
-.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg
- :alt: Supported Python versions
- :target: https://pypi.org/project/jsonschema/
-
-.. |Travis| image:: https://travis-ci.com/Julian/jsonschema.svg?branch=master
- :alt: Travis build status
- :target: https://travis-ci.com/Julian/jsonschema
-
-.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/adtt0aiaihy6muyn/branch/master?svg=true
- :alt: AppVeyor build status
- :target: https://ci.appveyor.com/project/Julian/jsonschema
-
-.. |Codecov| image:: https://codecov.io/gh/Julian/jsonschema/branch/master/graph/badge.svg
- :alt: Codecov Code coverage
- :target: https://codecov.io/gh/Julian/jsonschema
-
-.. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat
- :alt: ReadTheDocs status
- :target: https://python-jsonschema.readthedocs.io/en/stable/
-
-
-``jsonschema`` is an implementation of `JSON Schema `_
-for Python (supporting 2.7+ including Python 3).
-
-.. code-block:: python
-
- >>> from jsonschema import validate
-
- >>> # A sample schema, like what we'd get from json.load()
- >>> schema = {
- ... "type" : "object",
- ... "properties" : {
- ... "price" : {"type" : "number"},
- ... "name" : {"type" : "string"},
- ... },
- ... }
-
- >>> # If no exception is raised by validate(), the instance is valid.
- >>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
-
- >>> validate(
- ... instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
- ... ) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValidationError: 'Invalid' is not of type 'number'
-
-It can also be used from console:
-
-.. code-block:: bash
-
- $ jsonschema -i sample.json sample.schema
-
-Features
---------
-
-* Full support for
- `Draft 7 `_,
- `Draft 6 `_,
- `Draft 4 `_
- and
- `Draft 3 `_
-
-* `Lazy validation `_
- that can iteratively report *all* validation errors.
-
-* `Programmatic querying `_
- of which properties or items failed validation.
-
-
-Installation
-------------
-
-``jsonschema`` is available on `PyPI `_. You can install using `pip `_:
-
-.. code-block:: bash
-
- $ pip install jsonschema
-
-
-Demo
-----
-
-Try ``jsonschema`` interactively in this online demo:
-
-.. image:: https://user-images.githubusercontent.com/1155573/56745335-8b158a00-6750-11e9-8776-83fa675939c4.png
- :target: https://notebooks.ai/demo/gh/Julian/jsonschema
- :alt: Open Live Demo
-
-
-Online demo Notebook will look similar to this:
-
-
-.. image:: https://user-images.githubusercontent.com/1155573/56820861-5c1c1880-6823-11e9-802a-ce01c5ec574f.gif
- :alt: Open Live Demo
- :width: 480 px
-
-
-Release Notes
--------------
-
-v3.1 brings support for ECMA 262 dialect regular expressions
-throughout schemas, as recommended by the specification. Big
-thanks to @Zac-HD for authoring support in a new `js-regex
- `_ library.
-
-
-Running the Test Suite
-----------------------
-
-If you have ``tox`` installed (perhaps via ``pip install tox`` or your
-package manager), running ``tox`` in the directory of your source
-checkout will run ``jsonschema``'s test suite on all of the versions
-of Python ``jsonschema`` supports. If you don't have all of the
-versions that ``jsonschema`` is tested under, you'll likely want to run
-using ``tox``'s ``--skip-missing-interpreters`` option.
-
-Of course you're also free to just run the tests on a single version with your
-favorite test runner. The tests live in the ``jsonschema.tests`` package.
-
-
-Benchmarks
-----------
-
-``jsonschema``'s benchmarks make use of `pyperf
-`_.
-
-Running them can be done via ``tox -e perf``, or by invoking the ``pyperf``
-commands externally (after ensuring that both it and ``jsonschema`` itself are
-installed)::
-
- $ python -m pyperf jsonschema/benchmarks/test_suite.py --hist --output results.json
-
-To compare to a previous run, use::
-
- $ python -m pyperf compare_to --table reference.json results.json
-
-See the ``pyperf`` documentation for more details.
-
-
-Community
----------
-
-There's a `mailing list `_
-for this implementation on Google Groups.
-
-Please join, and feel free to send questions there.
-
-
-Contributing
-------------
-
-I'm Julian Berman.
-
-``jsonschema`` is on `GitHub `_.
-
-Get in touch, via GitHub or otherwise, if you've got something to contribute,
-it'd be most welcome!
-
-You can also generally find me on Freenode (nick: ``tos9``) in various
-channels, including ``#python``.
-
-If you feel overwhelmingly grateful, you can also woo me with beer money
-via Google Pay with the email in my GitHub profile.
-
-And for companies who appreciate ``jsonschema`` and its continued support
-and growth, ``jsonschema`` is also now supportable via `TideLift
-`_.
-
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/RECORD
deleted file mode 100644
index 7cd95550..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/RECORD
+++ /dev/null
@@ -1,37 +0,0 @@
-../../bin/jsonschema,sha256=lxBKiFj4zfSEeher0xvOrFwGZp6m2vSW1P8sy-V0baA,213
-jsonschema-3.2.0.dist-info/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057
-jsonschema-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jsonschema-3.2.0.dist-info/METADATA,sha256=os_TL7tiSfPYDMKYoAqoNsw_yMkDJmCL2bqhp-csNR0,7760
-jsonschema-3.2.0.dist-info/RECORD,,
-jsonschema-3.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonschema-3.2.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
-jsonschema-3.2.0.dist-info/entry_points.txt,sha256=KaVUBBSLyzi5naUkVg-r3q6T_igdLgaHY6Mm3oLX73s,52
-jsonschema-3.2.0.dist-info/top_level.txt,sha256=jGoNS61vDONU8U7p0Taf-y_8JVG1Z2CJ5Eif6zMN_cw,11
-jsonschema/__init__.py,sha256=dHAr_pQLbbDFoRnbVMrQVztVUvnBFgFlm7bU82pMvOk,934
-jsonschema/__main__.py,sha256=in4bbzfixCAyGe3RhBwhQVZnGkruszNedcbmwxGyJgc,39
-jsonschema/_format.py,sha256=vwD1v7S8BmJvSF5y0o6dbPgjAyzt07PZpyO3pvNVVgQ,11691
-jsonschema/_legacy_validators.py,sha256=kYcYiHfRV-aQtIQv2qe_71L3QFs3LiJ3v69ifteAN4E,4584
-jsonschema/_reflect.py,sha256=gggQrcrf5FRoyhgdE6ggJ4n2FQHEzWS4CS-cm9bYcqI,5023
-jsonschema/_types.py,sha256=t2naRRhuTKIUIB0GMR9kOp2la2aVqeT2tFlVAobndmg,4490
-jsonschema/_utils.py,sha256=ezZJMQ0eU4oPvkTmZi6g5vsCtIFRhb8vN4Y9S4uQwW8,5168
-jsonschema/_validators.py,sha256=UDYawpxK8f_rIeEBXZtwr0tlxi3OH1Zt2ca0zAxjNdk,11703
-jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70
-jsonschema/benchmarks/issue232.py,sha256=-azAUmrP75f0uj0x2zEdBc3-DhQw3XX9UQVDCyhBKRk,541
-jsonschema/benchmarks/json_schema_test_suite.py,sha256=okRE6ACue2C0Hd1dMhnpZ0bc3AoZdDd8cw2lwTnbzwU,343
-jsonschema/cli.py,sha256=3Vc8ptc2GD7zDxK2F-kamqmrE9f35a2KVDGR1p1acUU,2310
-jsonschema/compat.py,sha256=37gSA8MmAR65zlqzsSEB-0ObZk_I2TF7z1kp9zmkskg,1353
-jsonschema/exceptions.py,sha256=ukWIE7aEES8Kh0UaUP9turpUkV2ZzXEN8CwfRObzlMA,10450
-jsonschema/schemas/draft3.json,sha256=PdtCu2s06Va3hV9cX5A5-rvye50SVF__NrvxG0vuzz0,4564
-jsonschema/schemas/draft4.json,sha256=ODL-0W3kv7MmqL3tm3GJguuVxN1QNO1GtBcpWE3ok98,5399
-jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437
-jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819
-jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-jsonschema/tests/_helpers.py,sha256=3c-b9CK0cdGfhtuUhzM1AjtqPtR2VFvfcKC6G2g0a-0,157
-jsonschema/tests/_suite.py,sha256=6lxDHOyjJfCjdn9vfOLcUpXtNl0vLIljrinSFi1tRhc,6728
-jsonschema/tests/test_cli.py,sha256=djw7ZD6zm5_8FgsAr9XyYk4zErIEoPRs8SzBe5nYcWY,4727
-jsonschema/tests/test_exceptions.py,sha256=zw9bd_al5zOzAm8nJ0IqeymiweH6i8k1AN3CB7t618A,15348
-jsonschema/tests/test_format.py,sha256=ob0QDop_nwRwiLs1P6sGsf6ZITik00CWhe1pL8JRiA0,2982
-jsonschema/tests/test_jsonschema_test_suite.py,sha256=8uiplgvQq5yFvtvWxbyqyr7HMYRCx6jNE3OiU-u8AEk,8464
-jsonschema/tests/test_types.py,sha256=lntWPZ86fwo_aNKbfCueX5R2xdwrYYN7Zo5C0-ppk-0,5902
-jsonschema/tests/test_validators.py,sha256=R_zhsDKG5r66LE1OVlzdcPyKRWKgc07e6NVWxQkrRiQ,60394
-jsonschema/validators.py,sha256=RIZTQyZxhWwsyIIRFQGEjLzq38LlyzzzdYUl9jxzV0M,29400
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/WHEEL
deleted file mode 100644
index 8b701e93..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.33.6)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/entry_points.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/entry_points.txt
deleted file mode 100644
index c627b310..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-jsonschema = jsonschema.cli:main
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/top_level.txt
deleted file mode 100644
index d89304b1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema-3.2.0.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jsonschema
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__init__.py
old mode 100644
new mode 100755
index 6b630cdf..baf1d89b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__init__.py
@@ -4,31 +4,21 @@
The main functionality is provided by the validator classes for each of the
supported JSON Schema versions.
-Most commonly, `validate` is the quickest way to simply validate a given
+Most commonly, :func:`validate` is the quickest way to simply validate a given
instance under a schema, and will create a validator for you.
+
"""
from jsonschema.exceptions import (
ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
)
from jsonschema._format import (
- FormatChecker,
- draft3_format_checker,
- draft4_format_checker,
- draft6_format_checker,
- draft7_format_checker,
+ FormatChecker, draft3_format_checker, draft4_format_checker,
)
-from jsonschema._types import TypeChecker
from jsonschema.validators import (
- Draft3Validator,
- Draft4Validator,
- Draft6Validator,
- Draft7Validator,
- RefResolver,
- validate,
+ Draft3Validator, Draft4Validator, RefResolver, validate
)
-try:
- from importlib import metadata
-except ImportError: # for Python<3.8
- import importlib_metadata as metadata
-__version__ = metadata.version("jsonschema")
+
+from jsonschema._version import __version__
+
+# flake8: noqa
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__main__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/__main__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_format.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_format.py
old mode 100644
new mode 100755
index 281a7cfc..caae127f
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_format.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_format.py
@@ -1,7 +1,6 @@
import datetime
import re
import socket
-import struct
from jsonschema.compat import str_types
from jsonschema.exceptions import FormatError
@@ -15,19 +14,20 @@ class FormatChecker(object):
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
- `FormatChecker` objects always return ``True`` when asked about
+ :class:`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
- returns a ``bool``, use the `FormatChecker.checks` or
- `FormatChecker.cls_checks` decorators.
+ returns a ``bool``, use the :meth:`FormatChecker.checks` or
+ :meth:`FormatChecker.cls_checks` decorators.
Arguments:
- formats (~collections.Iterable):
+ formats (iterable):
The known formats to validate. This argument can be used to
limit which formats will be used during validation.
+
"""
checkers = {}
@@ -38,9 +38,6 @@ def __init__(self, formats=None):
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
- def __repr__(self):
- return "".format(sorted(self.checkers))
-
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
@@ -53,12 +50,13 @@ def checks(self, format, raises=()):
raises (Exception):
- The exception(s) raised by the decorated function when an
- invalid instance is found.
+ The exception(s) raised by the decorated function when
+ an invalid instance is found.
The exception object will be accessible as the
- `jsonschema.exceptions.ValidationError.cause` attribute of the
- resulting validation error.
+ :attr:`ValidationError.cause` attribute of the resulting
+ validation error.
+
"""
def _checks(func):
@@ -74,7 +72,7 @@ def check(self, instance, format):
Arguments:
- instance (*any primitive type*, i.e. str, number, bool):
+ instance (any primitive type, i.e. str, number, bool):
The instance to check
@@ -85,7 +83,8 @@ def check(self, instance, format):
Raises:
- FormatError: if the instance does not conform to ``format``
+ :exc:`FormatError` if instance does not conform to ``format``
+
"""
if format not in self.checkers:
@@ -108,7 +107,7 @@ def conforms(self, instance, format):
Arguments:
- instance (*any primitive type*, i.e. str, number, bool):
+ instance (any primitive type, i.e. str, number, bool):
The instance to check
@@ -118,7 +117,8 @@ def conforms(self, instance, format):
Returns:
- bool: whether it conformed
+ bool: Whether it conformed
+
"""
try:
@@ -129,55 +129,25 @@ def conforms(self, instance, format):
return True
-draft3_format_checker = FormatChecker()
-draft4_format_checker = FormatChecker()
-draft6_format_checker = FormatChecker()
-draft7_format_checker = FormatChecker()
-
-
-_draft_checkers = dict(
- draft3=draft3_format_checker,
- draft4=draft4_format_checker,
- draft6=draft6_format_checker,
- draft7=draft7_format_checker,
-)
+_draft_checkers = {"draft3": [], "draft4": []}
-def _checks_drafts(
- name=None,
- draft3=None,
- draft4=None,
- draft6=None,
- draft7=None,
- raises=(),
-):
- draft3 = draft3 or name
- draft4 = draft4 or name
- draft6 = draft6 or name
- draft7 = draft7 or name
+def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
+ draft3 = draft3 or both
+ draft4 = draft4 or both
def wrap(func):
if draft3:
- func = _draft_checkers["draft3"].checks(draft3, raises)(func)
+ _draft_checkers["draft3"].append(draft3)
+ func = FormatChecker.cls_checks(draft3, raises)(func)
if draft4:
- func = _draft_checkers["draft4"].checks(draft4, raises)(func)
- if draft6:
- func = _draft_checkers["draft6"].checks(draft6, raises)(func)
- if draft7:
- func = _draft_checkers["draft7"].checks(draft7, raises)(func)
-
- # Oy. This is bad global state, but relied upon for now, until
- # deprecation. See https://github.com/Julian/jsonschema/issues/519
- # and test_format_checkers_come_with_defaults
- FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
- func,
- )
+ _draft_checkers["draft4"].append(draft4)
+ func = FormatChecker.cls_checks(draft4, raises)(func)
return func
return wrap
-@_checks_drafts(name="idn-email")
-@_checks_drafts(name="email")
+@_checks_drafts("email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
@@ -187,9 +157,7 @@ def is_email(instance):
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
-@_checks_drafts(
- draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
-)
+@_checks_drafts(draft3="ip-address", draft4="ipv4")
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
@@ -199,11 +167,7 @@ def is_ipv4(instance):
if hasattr(socket, "inet_pton"):
- # FIXME: Really this only should raise struct.error, but see the sadness
- # that is https://twistedmatrix.com/trac/ticket/9409
- @_checks_drafts(
- name="ipv6", raises=(socket.error, struct.error, ValueError),
- )
+ @_checks_drafts("ipv6", raises=socket.error)
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
@@ -213,12 +177,7 @@ def is_ipv6(instance):
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
-@_checks_drafts(
- draft3="host-name",
- draft4="hostname",
- draft6="hostname",
- draft7="hostname",
-)
+@_checks_drafts(draft3="host-name", draft4="hostname")
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
@@ -232,103 +191,46 @@ def is_host_name(instance):
try:
- # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
- import idna
+ import rfc3987
except ImportError:
pass
else:
- @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
- def is_idn_host_name(instance):
+ @_checks_drafts("uri", raises=ValueError)
+ def is_uri(instance):
if not isinstance(instance, str_types):
return True
- idna.encode(instance)
- return True
+ return rfc3987.parse(instance, rule="URI")
try:
- import rfc3987
+ import strict_rfc3339
except ImportError:
try:
- from rfc3986_validator import validate_rfc3986
+ import isodate
except ImportError:
pass
else:
- @_checks_drafts(name="uri")
- def is_uri(instance):
- if not isinstance(instance, str_types):
- return True
- return validate_rfc3986(instance, rule="URI")
-
- @_checks_drafts(
- draft6="uri-reference",
- draft7="uri-reference",
- raises=ValueError,
- )
- def is_uri_reference(instance):
+ @_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
+ def is_datetime(instance):
if not isinstance(instance, str_types):
return True
- return validate_rfc3986(instance, rule="URI_reference")
-
+ return isodate.parse_datetime(instance)
else:
- @_checks_drafts(draft7="iri", raises=ValueError)
- def is_iri(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="IRI")
-
- @_checks_drafts(draft7="iri-reference", raises=ValueError)
- def is_iri_reference(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="IRI_reference")
-
- @_checks_drafts(name="uri", raises=ValueError)
- def is_uri(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="URI")
-
- @_checks_drafts(
- draft6="uri-reference",
- draft7="uri-reference",
- raises=ValueError,
- )
- def is_uri_reference(instance):
- if not isinstance(instance, str_types):
- return True
- return rfc3987.parse(instance, rule="URI_reference")
-
-
-try:
- from strict_rfc3339 import validate_rfc3339
-except ImportError:
- try:
- from rfc3339_validator import validate_rfc3339
- except ImportError:
- validate_rfc3339 = None
-
-if validate_rfc3339:
- @_checks_drafts(name="date-time")
+ @_checks_drafts("date-time")
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
- return validate_rfc3339(instance)
-
- @_checks_drafts(draft7="time")
- def is_time(instance):
- if not isinstance(instance, str_types):
- return True
- return is_datetime("1970-01-01T" + instance)
+ return strict_rfc3339.validate_rfc3339(instance)
-@_checks_drafts(name="regex", raises=re.error)
+@_checks_drafts("regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
-@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
+@_checks_drafts(draft3="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
@@ -336,7 +238,7 @@ def is_date(instance):
@_checks_drafts(draft3="time", raises=ValueError)
-def is_draft3_time(instance):
+def is_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
@@ -365,61 +267,5 @@ def is_css3_color(instance):
return is_css_color_code(instance)
-try:
- import jsonpointer
-except ImportError:
- pass
-else:
- @_checks_drafts(
- draft6="json-pointer",
- draft7="json-pointer",
- raises=jsonpointer.JsonPointerException,
- )
- def is_json_pointer(instance):
- if not isinstance(instance, str_types):
- return True
- return jsonpointer.JsonPointer(instance)
-
- # TODO: I don't want to maintain this, so it
- # needs to go either into jsonpointer (pending
- # https://github.com/stefankoegl/python-json-pointer/issues/34) or
- # into a new external library.
- @_checks_drafts(
- draft7="relative-json-pointer",
- raises=jsonpointer.JsonPointerException,
- )
- def is_relative_json_pointer(instance):
- # Definition taken from:
- # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
- if not isinstance(instance, str_types):
- return True
- non_negative_integer, rest = [], ""
- for i, character in enumerate(instance):
- if character.isdigit():
- non_negative_integer.append(character)
- continue
-
- if not non_negative_integer:
- return False
-
- rest = instance[i:]
- break
- return (rest == "#") or jsonpointer.JsonPointer(rest)
-
-
-try:
- import uritemplate.exceptions
-except ImportError:
- pass
-else:
- @_checks_drafts(
- draft6="uri-template",
- draft7="uri-template",
- raises=uritemplate.exceptions.InvalidTemplate,
- )
- def is_uri_template(
- instance,
- template_validator=uritemplate.Validator().force_balanced_braces(),
- ):
- template = uritemplate.URITemplate(instance)
- return template_validator.validate(template)
+draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
+draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_legacy_validators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_legacy_validators.py
deleted file mode 100644
index 264ff7d7..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_legacy_validators.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from jsonschema import _utils
-from jsonschema.compat import iteritems
-from jsonschema.exceptions import ValidationError
-
-
-def dependencies_draft3(validator, dependencies, instance, schema):
- if not validator.is_type(instance, "object"):
- return
-
- for property, dependency in iteritems(dependencies):
- if property not in instance:
- continue
-
- if validator.is_type(dependency, "object"):
- for error in validator.descend(
- instance, dependency, schema_path=property,
- ):
- yield error
- elif validator.is_type(dependency, "string"):
- if dependency not in instance:
- yield ValidationError(
- "%r is a dependency of %r" % (dependency, property)
- )
- else:
- for each in dependency:
- if each not in instance:
- message = "%r is a dependency of %r"
- yield ValidationError(message % (each, property))
-
-
-def disallow_draft3(validator, disallow, instance, schema):
- for disallowed in _utils.ensure_list(disallow):
- if validator.is_valid(instance, {"type": [disallowed]}):
- yield ValidationError(
- "%r is disallowed for %r" % (disallowed, instance)
- )
-
-
-def extends_draft3(validator, extends, instance, schema):
- if validator.is_type(extends, "object"):
- for error in validator.descend(instance, extends):
- yield error
- return
- for index, subschema in enumerate(extends):
- for error in validator.descend(instance, subschema, schema_path=index):
- yield error
-
-
-def items_draft3_draft4(validator, items, instance, schema):
- if not validator.is_type(instance, "array"):
- return
-
- if validator.is_type(items, "object"):
- for index, item in enumerate(instance):
- for error in validator.descend(item, items, path=index):
- yield error
- else:
- for (index, item), subschema in zip(enumerate(instance), items):
- for error in validator.descend(
- item, subschema, path=index, schema_path=index,
- ):
- yield error
-
-
-def minimum_draft3_draft4(validator, minimum, instance, schema):
- if not validator.is_type(instance, "number"):
- return
-
- if schema.get("exclusiveMinimum", False):
- failed = instance <= minimum
- cmp = "less than or equal to"
- else:
- failed = instance < minimum
- cmp = "less than"
-
- if failed:
- yield ValidationError(
- "%r is %s the minimum of %r" % (instance, cmp, minimum)
- )
-
-
-def maximum_draft3_draft4(validator, maximum, instance, schema):
- if not validator.is_type(instance, "number"):
- return
-
- if schema.get("exclusiveMaximum", False):
- failed = instance >= maximum
- cmp = "greater than or equal to"
- else:
- failed = instance > maximum
- cmp = "greater than"
-
- if failed:
- yield ValidationError(
- "%r is %s the maximum of %r" % (instance, cmp, maximum)
- )
-
-
-def properties_draft3(validator, properties, instance, schema):
- if not validator.is_type(instance, "object"):
- return
-
- for property, subschema in iteritems(properties):
- if property in instance:
- for error in validator.descend(
- instance[property],
- subschema,
- path=property,
- schema_path=property,
- ):
- yield error
- elif subschema.get("required", False):
- error = ValidationError("%r is a required property" % property)
- error._set(
- validator="required",
- validator_value=subschema["required"],
- instance=instance,
- schema=schema,
- )
- error.path.appendleft(property)
- error.schema_path.extend([property, "required"])
- yield error
-
-
-def type_draft3(validator, types, instance, schema):
- types = _utils.ensure_list(types)
-
- all_errors = []
- for index, type in enumerate(types):
- if validator.is_type(type, "object"):
- errors = list(validator.descend(instance, type, schema_path=index))
- if not errors:
- return
- all_errors.extend(errors)
- else:
- if validator.is_type(instance, type):
- return
- else:
- yield ValidationError(
- _utils.types_msg(instance, types), context=all_errors,
- )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_reflect.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_reflect.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_types.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_types.py
deleted file mode 100644
index a71a4e34..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_types.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import numbers
-
-from pyrsistent import pmap
-import attr
-
-from jsonschema.compat import int_types, str_types
-from jsonschema.exceptions import UndefinedTypeCheck
-
-
-def is_array(checker, instance):
- return isinstance(instance, list)
-
-
-def is_bool(checker, instance):
- return isinstance(instance, bool)
-
-
-def is_integer(checker, instance):
- # bool inherits from int, so ensure bools aren't reported as ints
- if isinstance(instance, bool):
- return False
- return isinstance(instance, int_types)
-
-
-def is_null(checker, instance):
- return instance is None
-
-
-def is_number(checker, instance):
- # bool inherits from int, so ensure bools aren't reported as ints
- if isinstance(instance, bool):
- return False
- return isinstance(instance, numbers.Number)
-
-
-def is_object(checker, instance):
- return isinstance(instance, dict)
-
-
-def is_string(checker, instance):
- return isinstance(instance, str_types)
-
-
-def is_any(checker, instance):
- return True
-
-
-@attr.s(frozen=True)
-class TypeChecker(object):
- """
- A ``type`` property checker.
-
- A `TypeChecker` performs type checking for an `IValidator`. Type
- checks to perform are updated using `TypeChecker.redefine` or
- `TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
- Each of these return a new `TypeChecker` object.
-
- Arguments:
-
- type_checkers (dict):
-
- The initial mapping of types to their checking functions.
- """
- _type_checkers = attr.ib(default=pmap(), converter=pmap)
-
- def is_type(self, instance, type):
- """
- Check if the instance is of the appropriate type.
-
- Arguments:
-
- instance (object):
-
- The instance to check
-
- type (str):
-
- The name of the type that is expected.
-
- Returns:
-
- bool: Whether it conformed.
-
-
- Raises:
-
- `jsonschema.exceptions.UndefinedTypeCheck`:
- if type is unknown to this object.
- """
- try:
- fn = self._type_checkers[type]
- except KeyError:
- raise UndefinedTypeCheck(type)
-
- return fn(self, instance)
-
- def redefine(self, type, fn):
- """
- Produce a new checker with the given type redefined.
-
- Arguments:
-
- type (str):
-
- The name of the type to check.
-
- fn (collections.Callable):
-
- A function taking exactly two parameters - the type
- checker calling the function and the instance to check.
- The function should return true if instance is of this
- type and false otherwise.
-
- Returns:
-
- A new `TypeChecker` instance.
- """
- return self.redefine_many({type: fn})
-
- def redefine_many(self, definitions=()):
- """
- Produce a new checker with the given types redefined.
-
- Arguments:
-
- definitions (dict):
-
- A dictionary mapping types to their checking functions.
-
- Returns:
-
- A new `TypeChecker` instance.
- """
- return attr.evolve(
- self, type_checkers=self._type_checkers.update(definitions),
- )
-
- def remove(self, *types):
- """
- Produce a new checker with the given types forgotten.
-
- Arguments:
-
- types (~collections.Iterable):
-
- the names of the types to remove.
-
- Returns:
-
- A new `TypeChecker` instance
-
- Raises:
-
- `jsonschema.exceptions.UndefinedTypeCheck`:
-
- if any given type is unknown to this object
- """
-
- checkers = self._type_checkers
- for each in types:
- try:
- checkers = checkers.remove(each)
- except KeyError:
- raise UndefinedTypeCheck(each)
- return attr.evolve(self, type_checkers=checkers)
-
-
-draft3_type_checker = TypeChecker(
- {
- u"any": is_any,
- u"array": is_array,
- u"boolean": is_bool,
- u"integer": is_integer,
- u"object": is_object,
- u"null": is_null,
- u"number": is_number,
- u"string": is_string,
- },
-)
-draft4_type_checker = draft3_type_checker.remove(u"any")
-draft6_type_checker = draft4_type_checker.redefine(
- u"integer",
- lambda checker, instance: (
- is_integer(checker, instance) or
- isinstance(instance, float) and instance.is_integer()
- ),
-)
-draft7_type_checker = draft6_type_checker
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_utils.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_utils.py
old mode 100644
new mode 100755
index ceb88019..7b1c33a0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_utils.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_utils.py
@@ -3,12 +3,13 @@
import pkgutil
import re
-from jsonschema.compat import MutableMapping, str_types, urlsplit
+from jsonschema.compat import str_types, MutableMapping, urlsplit
class URIDict(MutableMapping):
"""
Dictionary which uses normalized URIs as keys.
+
"""
def normalize(self, uri):
@@ -40,6 +41,7 @@ def __repr__(self):
class Unset(object):
"""
An as-of-yet unset attribute or unprovided default parameter.
+
"""
def __repr__(self):
@@ -49,15 +51,17 @@ def __repr__(self):
def load_schema(name):
"""
Load a schema from ./schemas/``name``.json and return it.
+
"""
- data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name))
+ data = pkgutil.get_data('jsonschema', "schemas/{0}.json".format(name))
return json.loads(data.decode("utf-8"))
def indent(string, times=1):
"""
- A dumb version of `textwrap.indent` from Python 3.3.
+ A dumb version of :func:`textwrap.indent` from Python 3.3.
+
"""
return "\n".join(" " * (4 * times) + line for line in string.splitlines())
@@ -74,6 +78,7 @@ def format_as_index(indices):
indices (sequence):
The indices to format.
+
"""
if not indices:
@@ -89,6 +94,7 @@ def find_additional_properties(instance, schema):
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
+
"""
properties = schema.get("properties", {})
@@ -103,6 +109,7 @@ def find_additional_properties(instance, schema):
def extras_msg(extras):
"""
Create an error message for extra items or properties.
+
"""
if len(extras) == 1:
@@ -120,6 +127,7 @@ def types_msg(instance, types):
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``.
+
"""
reprs = []
@@ -139,6 +147,7 @@ def flatten(suitable_for_isinstance):
* an arbitrary nested tree of tuples
Return a flattened tuple of the given argument.
+
"""
types = set()
@@ -158,6 +167,7 @@ def ensure_list(thing):
Wrap ``thing`` in a list if it's a single str.
Otherwise, return it unchanged.
+
"""
if isinstance(thing, str_types):
@@ -165,16 +175,10 @@ def ensure_list(thing):
return thing
-def equal(one, two):
- """
- Check if two things are equal, but evade booleans and ints being equal.
- """
- return unbool(one) == unbool(two)
-
-
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
+
"""
if element is True:
@@ -191,6 +195,7 @@ def uniq(container):
Successively tries first to rely that the elements are hashable, then
falls back on them being sortable, and finally falls back on brute
force.
+
"""
try:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_validators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_validators.py
old mode 100644
new mode 100755
index 179fec09..d0baec81
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_validators.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_validators.py
@@ -1,14 +1,6 @@
import re
-from jsonschema._utils import (
- ensure_list,
- equal,
- extras_msg,
- find_additional_properties,
- types_msg,
- unbool,
- uniq,
-)
+from jsonschema import _utils
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
@@ -26,23 +18,11 @@ def patternProperties(validator, patternProperties, instance, schema):
yield error
-def propertyNames(validator, propertyNames, instance, schema):
- if not validator.is_type(instance, "object"):
- return
-
- for property in instance:
- for error in validator.descend(
- instance=property,
- schema=propertyNames,
- ):
- yield error
-
-
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
- extras = set(find_additional_properties(instance, schema))
+ extras = set(_utils.find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
@@ -63,23 +43,23 @@ def additionalProperties(validator, aP, instance, schema):
yield ValidationError(error)
else:
error = "Additional properties are not allowed (%s %s unexpected)"
- yield ValidationError(error % extras_msg(extras))
+ yield ValidationError(error % _utils.extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
- if validator.is_type(items, "array"):
+ if validator.is_type(items, "object"):
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
+ else:
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
- else:
- for index, item in enumerate(instance):
- for error in validator.descend(item, items, path=index):
- yield error
def additionalItems(validator, aI, instance, schema):
@@ -98,46 +78,7 @@ def additionalItems(validator, aI, instance, schema):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
- extras_msg(instance[len(schema.get("items", [])):])
- )
-
-
-def const(validator, const, instance, schema):
- if not equal(instance, const):
- yield ValidationError("%r was expected" % (const,))
-
-
-def contains(validator, contains, instance, schema):
- if not validator.is_type(instance, "array"):
- return
-
- if not any(validator.is_valid(element, contains) for element in instance):
- yield ValidationError(
- "None of %r are valid under the given schema" % (instance,)
- )
-
-
-def exclusiveMinimum(validator, minimum, instance, schema):
- if not validator.is_type(instance, "number"):
- return
-
- if instance <= minimum:
- yield ValidationError(
- "%r is less than or equal to the minimum of %r" % (
- instance, minimum,
- ),
- )
-
-
-def exclusiveMaximum(validator, maximum, instance, schema):
- if not validator.is_type(instance, "number"):
- return
-
- if instance >= maximum:
- yield ValidationError(
- "%r is greater than or equal to the maximum of %r" % (
- instance, maximum,
- ),
+ _utils.extras_msg(instance[len(schema.get("items", [])):])
)
@@ -145,9 +86,16 @@ def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
- if instance < minimum:
+ if schema.get("exclusiveMinimum", False):
+ failed = instance <= minimum
+ cmp = "less than or equal to"
+ else:
+ failed = instance < minimum
+ cmp = "less than"
+
+ if failed:
yield ValidationError(
- "%r is less than the minimum of %r" % (instance, minimum)
+ "%r is %s the minimum of %r" % (instance, cmp, minimum)
)
@@ -155,9 +103,16 @@ def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
- if instance > maximum:
+ if schema.get("exclusiveMaximum", False):
+ failed = instance >= maximum
+ cmp = "greater than or equal to"
+ else:
+ failed = instance > maximum
+ cmp = "greater than"
+
+ if failed:
yield ValidationError(
- "%r is greater than the maximum of %r" % (instance, maximum)
+ "%r is %s the maximum of %r" % (instance, cmp, maximum)
)
@@ -189,7 +144,7 @@ def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
- not uniq(instance)
+ not _utils.uniq(instance)
):
yield ValidationError("%r has non-unique elements" % (instance,))
@@ -228,24 +183,22 @@ def dependencies(validator, dependencies, instance, schema):
if property not in instance:
continue
- if validator.is_type(dependency, "array"):
- for each in dependency:
- if each not in instance:
- message = "%r is a dependency of %r"
- yield ValidationError(message % (each, property))
- else:
+ if validator.is_type(dependency, "object"):
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
+ else:
+ dependencies = _utils.ensure_list(dependency)
+ for dependency in dependencies:
+ if dependency not in instance:
+ yield ValidationError(
+ "%r is a dependency of %r" % (dependency, property)
+ )
def enum(validator, enums, instance, schema):
- if instance == 0 or instance == 1:
- unbooled = unbool(instance)
- if all(unbooled != unbool(each) for each in enums):
- yield ValidationError("%r is not one of %r" % (instance, enums))
- elif instance not in enums:
+ if instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
@@ -266,14 +219,79 @@ def ref(validator, ref, instance, schema):
validator.resolver.pop_scope()
-def type(validator, types, instance, schema):
- types = ensure_list(types)
+def type_draft3(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ all_errors = []
+ for index, type in enumerate(types):
+ if type == "any":
+ return
+ if validator.is_type(type, "object"):
+ errors = list(validator.descend(instance, type, schema_path=index))
+ if not errors:
+ return
+ all_errors.extend(errors)
+ else:
+ if validator.is_type(instance, type):
+ return
+ else:
+ yield ValidationError(
+ _utils.types_msg(instance, types), context=all_errors,
+ )
+
+
+def properties_draft3(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+ elif subschema.get("required", False):
+ error = ValidationError("%r is a required property" % property)
+ error._set(
+ validator="required",
+ validator_value=subschema["required"],
+ instance=instance,
+ schema=schema,
+ )
+ error.path.appendleft(property)
+ error.schema_path.extend([property, "required"])
+ yield error
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+ for disallowed in _utils.ensure_list(disallow):
+ if validator.is_valid(instance, {"type": [disallowed]}):
+ yield ValidationError(
+ "%r is disallowed for %r" % (disallowed, instance)
+ )
+
+
+def extends_draft3(validator, extends, instance, schema):
+ if validator.is_type(extends, "object"):
+ for error in validator.descend(instance, extends):
+ yield error
+ return
+ for index, subschema in enumerate(extends):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def type_draft4(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
- yield ValidationError(types_msg(instance, types))
+ yield ValidationError(_utils.types_msg(instance, types))
-def properties(validator, properties, instance, schema):
+def properties_draft4(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
@@ -288,7 +306,7 @@ def properties(validator, properties, instance, schema):
yield error
-def required(validator, required, instance, schema):
+def required_draft4(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
@@ -296,31 +314,33 @@ def required(validator, required, instance, schema):
yield ValidationError("%r is a required property" % property)
-def minProperties(validator, mP, instance, schema):
+def minProperties_draft4(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
-def maxProperties(validator, mP, instance, schema):
+def maxProperties_draft4(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
-def allOf(validator, allOf, instance, schema):
+def allOf_draft4(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
-def anyOf(validator, anyOf, instance, schema):
+def oneOf_draft4(validator, oneOf, instance, schema):
+ subschemas = enumerate(oneOf)
all_errors = []
- for index, subschema in enumerate(anyOf):
+ for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
+ first_valid = subschema
break
all_errors.extend(errs)
else:
@@ -329,14 +349,20 @@ def anyOf(validator, anyOf, instance, schema):
context=all_errors,
)
+ more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(
+ "%r is valid under each of %s" % (instance, reprs)
+ )
-def oneOf(validator, oneOf, instance, schema):
- subschemas = enumerate(oneOf)
+
+def anyOf_draft4(validator, anyOf, instance, schema):
all_errors = []
- for index, subschema in subschemas:
+ for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
- first_valid = subschema
break
all_errors.extend(errs)
else:
@@ -345,29 +371,9 @@ def oneOf(validator, oneOf, instance, schema):
context=all_errors,
)
- more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
- if more_valid:
- more_valid.append(first_valid)
- reprs = ", ".join(repr(schema) for schema in more_valid)
- yield ValidationError(
- "%r is valid under each of %s" % (instance, reprs)
- )
-
-def not_(validator, not_schema, instance, schema):
+def not_draft4(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
-
-
-def if_(validator, if_schema, instance, schema):
- if validator.is_valid(instance, if_schema):
- if u"then" in schema:
- then = schema[u"then"]
- for error in validator.descend(instance, then, schema_path="then"):
- yield error
- elif u"else" in schema:
- else_ = schema[u"else"]
- for error in validator.descend(instance, else_, schema_path="else"):
- yield error
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_version.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_version.py
new file mode 100755
index 00000000..f69ec9e0
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/_version.py
@@ -0,0 +1,5 @@
+
+# This file is automatically generated by setup.py.
+__version__ = '2.6.0'
+__sha__ = 'gd16713a'
+__revision__ = 'gd16713a'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/__init__.py
deleted file mode 100644
index e3dcc689..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-Benchmarks for validation.
-
-This package is *not* public API.
-"""
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/issue232.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/issue232.py
deleted file mode 100644
index 65e3aedf..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/issue232.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-A performance benchmark using the example from issue #232.
-
-See https://github.com/Julian/jsonschema/pull/232.
-"""
-from twisted.python.filepath import FilePath
-from pyperf import Runner
-from pyrsistent import m
-
-from jsonschema.tests._suite import Version
-import jsonschema
-
-
-issue232 = Version(
- path=FilePath(__file__).sibling("issue232"),
- remotes=m(),
- name="issue232",
-)
-
-
-if __name__ == "__main__":
- issue232.benchmark(
- runner=Runner(),
- Validator=jsonschema.Draft4Validator,
- )
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py
deleted file mode 100644
index 5add5051..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-"""
-A performance benchmark using the official test suite.
-
-This benchmarks jsonschema using every valid example in the
-JSON-Schema-Test-Suite. It will take some time to complete.
-"""
-from pyperf import Runner
-
-from jsonschema.tests._suite import Suite
-
-
-if __name__ == "__main__":
- Suite().benchmark(runner=Runner())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/cli.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/cli.py
old mode 100644
new mode 100755
index ab3335b2..fb1b0f50
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/cli.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/cli.py
@@ -1,12 +1,8 @@
-"""
-The ``jsonschema`` command line.
-"""
from __future__ import absolute_import
import argparse
import json
import sys
-from jsonschema import __version__
from jsonschema._reflect import namedAny
from jsonschema.validators import validator_for
@@ -31,7 +27,7 @@ def _json_file(path):
dest="instances",
type=_json_file,
help=(
- "a path to a JSON instance (i.e. filename.json) "
+ "a path to a JSON instance (i.e. filename.json)"
"to validate (may be specified multiple times)"
),
)
@@ -53,14 +49,9 @@ def _json_file(path):
"of the class."
),
)
-parser.add_argument(
- "--version",
- action="version",
- version=__version__,
-)
parser.add_argument(
"schema",
- help="the JSON Schema to validate with (i.e. schema.json)",
+ help="the JSON Schema to validate with (i.e. filename.schema)",
type=_json_file,
)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/compat.py
old mode 100644
new mode 100755
index 47e09804..ff91fe62
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/compat.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/compat.py
@@ -1,40 +1,33 @@
-"""
-Python 2/3 compatibility helpers.
-
-Note: This module is *not* public API.
-"""
-import contextlib
import operator
import sys
try:
- from collections.abc import MutableMapping, Sequence # noqa
-except ImportError:
from collections import MutableMapping, Sequence # noqa
+except ImportError:
+ from collections.abc import MutableMapping, Sequence # noqa
PY3 = sys.version_info[0] >= 3
if PY3:
zip = zip
from functools import lru_cache
- from io import StringIO as NativeIO
+ from io import StringIO
from urllib.parse import (
- unquote, urljoin, urlunsplit, SplitResult, urlsplit
+ unquote, urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit
)
- from urllib.request import pathname2url, urlopen
+ from urllib.request import urlopen
str_types = str,
int_types = int,
iteritems = operator.methodcaller("items")
else:
from itertools import izip as zip # noqa
- from io import BytesIO as NativeIO
- from urlparse import urljoin, urlunsplit, SplitResult, urlsplit
- from urllib import pathname2url, unquote # noqa
- import urllib2 # noqa
- def urlopen(*args, **kwargs):
- return contextlib.closing(urllib2.urlopen(*args, **kwargs))
-
+ from StringIO import StringIO
+ from urlparse import (
+ urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit # noqa
+ )
+ from urllib import unquote # noqa
+ from urllib2 import urlopen # noqa
str_types = basestring
int_types = int, long
iteritems = operator.methodcaller("iteritems")
@@ -42,13 +35,21 @@ def urlopen(*args, **kwargs):
from functools32 import lru_cache
+# On python < 3.3 fragments are not handled properly with unknown schemes
+def urlsplit(url):
+ scheme, netloc, path, query, fragment = _urlsplit(url)
+ if "#" in path:
+ path, fragment = path.split("#", 1)
+ return SplitResult(scheme, netloc, path, query, fragment)
+
+
def urldefrag(url):
if "#" in url:
s, n, p, q, frag = urlsplit(url)
- defrag = urlunsplit((s, n, p, q, ""))
+ defrag = urlunsplit((s, n, p, q, ''))
else:
defrag = url
- frag = ""
+ frag = ''
return defrag, frag
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/exceptions.py
old mode 100644
new mode 100755
index 691dcffe..da3e8527
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/exceptions.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/exceptions.py
@@ -1,13 +1,8 @@
-"""
-Validation errors, and some surrounding helpers.
-"""
from collections import defaultdict, deque
import itertools
import pprint
import textwrap
-import attr
-
from jsonschema import _utils
from jsonschema.compat import PY3, iteritems
@@ -72,18 +67,16 @@ def __unicode__(self):
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
- Failed validating %r in %s%s:
+ Failed validating %r in schema%s:
%s
- On %s%s:
+ On instance%s:
%s
""".rstrip()
) % (
self.validator,
- self._word_for_schema_in_error_message,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
- self._word_for_instance_in_error_message,
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
@@ -132,58 +125,18 @@ def _contents(self):
class ValidationError(_Error):
- """
- An instance was invalid under a provided schema.
- """
-
- _word_for_schema_in_error_message = "schema"
- _word_for_instance_in_error_message = "instance"
+ pass
class SchemaError(_Error):
- """
- A schema was invalid under its corresponding metaschema.
- """
-
- _word_for_schema_in_error_message = "metaschema"
- _word_for_instance_in_error_message = "schema"
+ pass
-@attr.s(hash=True)
class RefResolutionError(Exception):
- """
- A ref could not be resolved.
- """
-
- _cause = attr.ib()
-
- def __str__(self):
- return str(self._cause)
-
-
-class UndefinedTypeCheck(Exception):
- """
- A type checker was asked to check a type it did not have registered.
- """
-
- def __init__(self, type):
- self.type = type
-
- def __unicode__(self):
- return "Type %r is unknown to this type checker" % self.type
-
- if PY3:
- __str__ = __unicode__
- else:
- def __str__(self):
- return unicode(self).encode("utf-8")
+ pass
class UnknownType(Exception):
- """
- A validator was asked to validate an instance against an unknown type.
- """
-
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
@@ -209,10 +162,6 @@ def __str__(self):
class FormatError(Exception):
- """
- Validating a format failed.
- """
-
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
@@ -231,6 +180,7 @@ def __str__(self):
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
+
"""
_instance = _unset
@@ -250,6 +200,7 @@ def __init__(self, errors=()):
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
+
"""
return index in self._contents
@@ -261,7 +212,8 @@ def __getitem__(self, index):
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
- subclass of `exceptions.LookupError`.
+ subclass of :class:`LookupError`.
+
"""
if self._instance is not _unset and index not in self:
@@ -269,22 +221,22 @@ def __getitem__(self, index):
return self._contents[index]
def __setitem__(self, index, value):
- """
- Add an error to the tree at the given ``index``.
- """
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
+
"""
return iter(self._contents)
def __len__(self):
"""
- Return the `total_errors`.
+ Same as :attr:`total_errors`.
+
"""
+
return self.total_errors
def __repr__(self):
@@ -294,6 +246,7 @@ def __repr__(self):
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
+
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
@@ -301,21 +254,6 @@ def total_errors(self):
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
- """
- Create a key function that can be used to sort errors by relevance.
-
- Arguments:
- weak (set):
- a collection of validator names to consider to be "weak".
- If there are two errors at the same level of the instance
- and one is in the set of weak validator names, the other
- error will take priority. By default, :validator:`anyOf` and
- :validator:`oneOf` are considered weak validators and will
- be superseded by other same-level validation errors.
-
- strong (set):
- a collection of validator names to consider to be "strong"
- """
def relevance(error):
validator = error.validator
return -len(error.path), validator not in weak, validator in strong
@@ -326,43 +264,6 @@ def relevance(error):
def best_match(errors, key=relevance):
- """
- Try to find an error that appears to be the best match among given errors.
-
- In general, errors that are higher up in the instance (i.e. for which
- `ValidationError.path` is shorter) are considered better matches,
- since they indicate "more" is wrong with the instance.
-
- If the resulting match is either :validator:`oneOf` or :validator:`anyOf`,
- the *opposite* assumption is made -- i.e. the deepest error is picked,
- since these validators only need to match once, and any other errors may
- not be relevant.
-
- Arguments:
- errors (collections.Iterable):
-
- the errors to select from. Do not provide a mixture of
- errors from different validation attempts (i.e. from
- different instances or schemas), since it won't produce
- sensical output.
-
- key (collections.Callable):
-
- the key to use when sorting errors. See `relevance` and
- transitively `by_relevance` for more details (the default is
- to sort with the defaults of that function). Changing the
- default is only useful if you want to change the function
- that rates errors but still want the error context descent
- done by this function.
-
- Returns:
- the best matching error, or ``None`` if the iterable was empty
-
- .. note::
-
- This function is a heuristic. Its return value may change for a given
- set of inputs from version to version if better heuristics are added.
- """
errors = iter(errors)
best = next(errors, None)
if best is None:
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft3.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft3.json
index f8a09c56..5bcefe30 100644
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft3.json
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft3.json
@@ -80,7 +80,9 @@
"type": "number"
},
"enum": {
- "type": "array"
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
},
"exclusiveMaximum": {
"default": false,
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft4.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft4.json
index 9b666cff..bc7b2eea 100644
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft4.json
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft4.json
@@ -111,7 +111,9 @@
"type": "string"
},
"enum": {
- "type": "array"
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
},
"exclusiveMaximum": {
"default": false,
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft6.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft6.json
deleted file mode 100644
index a0d2bf78..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft6.json
+++ /dev/null
@@ -1,153 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-06/schema#",
- "$id": "http://json-schema.org/draft-06/schema#",
- "title": "Core schema meta-schema",
- "definitions": {
- "schemaArray": {
- "type": "array",
- "minItems": 1,
- "items": { "$ref": "#" }
- },
- "nonNegativeInteger": {
- "type": "integer",
- "minimum": 0
- },
- "nonNegativeIntegerDefault0": {
- "allOf": [
- { "$ref": "#/definitions/nonNegativeInteger" },
- { "default": 0 }
- ]
- },
- "simpleTypes": {
- "enum": [
- "array",
- "boolean",
- "integer",
- "null",
- "number",
- "object",
- "string"
- ]
- },
- "stringArray": {
- "type": "array",
- "items": { "type": "string" },
- "uniqueItems": true,
- "default": []
- }
- },
- "type": ["object", "boolean"],
- "properties": {
- "$id": {
- "type": "string",
- "format": "uri-reference"
- },
- "$schema": {
- "type": "string",
- "format": "uri"
- },
- "$ref": {
- "type": "string",
- "format": "uri-reference"
- },
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "default": {},
- "examples": {
- "type": "array",
- "items": {}
- },
- "multipleOf": {
- "type": "number",
- "exclusiveMinimum": 0
- },
- "maximum": {
- "type": "number"
- },
- "exclusiveMaximum": {
- "type": "number"
- },
- "minimum": {
- "type": "number"
- },
- "exclusiveMinimum": {
- "type": "number"
- },
- "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
- "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "pattern": {
- "type": "string",
- "format": "regex"
- },
- "additionalItems": { "$ref": "#" },
- "items": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/schemaArray" }
- ],
- "default": {}
- },
- "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
- "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "uniqueItems": {
- "type": "boolean",
- "default": false
- },
- "contains": { "$ref": "#" },
- "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
- "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "required": { "$ref": "#/definitions/stringArray" },
- "additionalProperties": { "$ref": "#" },
- "definitions": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "properties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "patternProperties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "propertyNames": { "format": "regex" },
- "default": {}
- },
- "dependencies": {
- "type": "object",
- "additionalProperties": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/stringArray" }
- ]
- }
- },
- "propertyNames": { "$ref": "#" },
- "const": {},
- "enum": {
- "type": "array"
- },
- "type": {
- "anyOf": [
- { "$ref": "#/definitions/simpleTypes" },
- {
- "type": "array",
- "items": { "$ref": "#/definitions/simpleTypes" },
- "minItems": 1,
- "uniqueItems": true
- }
- ]
- },
- "format": { "type": "string" },
- "allOf": { "$ref": "#/definitions/schemaArray" },
- "anyOf": { "$ref": "#/definitions/schemaArray" },
- "oneOf": { "$ref": "#/definitions/schemaArray" },
- "not": { "$ref": "#" }
- },
- "default": {}
-}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft7.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft7.json
deleted file mode 100644
index 746cde96..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/schemas/draft7.json
+++ /dev/null
@@ -1,166 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "$id": "http://json-schema.org/draft-07/schema#",
- "title": "Core schema meta-schema",
- "definitions": {
- "schemaArray": {
- "type": "array",
- "minItems": 1,
- "items": { "$ref": "#" }
- },
- "nonNegativeInteger": {
- "type": "integer",
- "minimum": 0
- },
- "nonNegativeIntegerDefault0": {
- "allOf": [
- { "$ref": "#/definitions/nonNegativeInteger" },
- { "default": 0 }
- ]
- },
- "simpleTypes": {
- "enum": [
- "array",
- "boolean",
- "integer",
- "null",
- "number",
- "object",
- "string"
- ]
- },
- "stringArray": {
- "type": "array",
- "items": { "type": "string" },
- "uniqueItems": true,
- "default": []
- }
- },
- "type": ["object", "boolean"],
- "properties": {
- "$id": {
- "type": "string",
- "format": "uri-reference"
- },
- "$schema": {
- "type": "string",
- "format": "uri"
- },
- "$ref": {
- "type": "string",
- "format": "uri-reference"
- },
- "$comment": {
- "type": "string"
- },
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "default": true,
- "readOnly": {
- "type": "boolean",
- "default": false
- },
- "examples": {
- "type": "array",
- "items": true
- },
- "multipleOf": {
- "type": "number",
- "exclusiveMinimum": 0
- },
- "maximum": {
- "type": "number"
- },
- "exclusiveMaximum": {
- "type": "number"
- },
- "minimum": {
- "type": "number"
- },
- "exclusiveMinimum": {
- "type": "number"
- },
- "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
- "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "pattern": {
- "type": "string",
- "format": "regex"
- },
- "additionalItems": { "$ref": "#" },
- "items": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/schemaArray" }
- ],
- "default": true
- },
- "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
- "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "uniqueItems": {
- "type": "boolean",
- "default": false
- },
- "contains": { "$ref": "#" },
- "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
- "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
- "required": { "$ref": "#/definitions/stringArray" },
- "additionalProperties": { "$ref": "#" },
- "definitions": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "properties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "patternProperties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "propertyNames": { "format": "regex" },
- "default": {}
- },
- "dependencies": {
- "type": "object",
- "additionalProperties": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/stringArray" }
- ]
- }
- },
- "propertyNames": { "$ref": "#" },
- "const": true,
- "enum": {
- "type": "array",
- "items": true
- },
- "type": {
- "anyOf": [
- { "$ref": "#/definitions/simpleTypes" },
- {
- "type": "array",
- "items": { "$ref": "#/definitions/simpleTypes" },
- "minItems": 1,
- "uniqueItems": true
- }
- ]
- },
- "format": { "type": "string" },
- "contentMediaType": { "type": "string" },
- "contentEncoding": { "type": "string" },
- "if": {"$ref": "#"},
- "then": {"$ref": "#"},
- "else": {"$ref": "#"},
- "allOf": { "$ref": "#/definitions/schemaArray" },
- "anyOf": { "$ref": "#/definitions/schemaArray" },
- "oneOf": { "$ref": "#/definitions/schemaArray" },
- "not": { "$ref": "#" }
- },
- "default": true
-}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_helpers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_helpers.py
deleted file mode 100644
index 70f291fe..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_helpers.py
+++ /dev/null
@@ -1,5 +0,0 @@
-def bug(issue=None):
- message = "A known bug."
- if issue is not None:
- message += " See issue #{issue}.".format(issue=issue)
- return message
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_suite.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_suite.py
deleted file mode 100644
index b68a7b66..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/_suite.py
+++ /dev/null
@@ -1,239 +0,0 @@
-"""
-Python representations of the JSON Schema Test Suite tests.
-"""
-
-from functools import partial
-import json
-import os
-import re
-import subprocess
-import sys
-import unittest
-
-from twisted.python.filepath import FilePath
-import attr
-
-from jsonschema.compat import PY3
-from jsonschema.validators import validators
-import jsonschema
-
-
-def _find_suite():
- root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
- if root is not None:
- return FilePath(root)
-
- root = FilePath(jsonschema.__file__).parent().sibling("json")
- if not root.isdir(): # pragma: no cover
- raise ValueError(
- (
- "Can't find the JSON-Schema-Test-Suite directory. "
- "Set the 'JSON_SCHEMA_TEST_SUITE' environment "
- "variable or run the tests from alongside a checkout "
- "of the suite."
- ),
- )
- return root
-
-
-@attr.s(hash=True)
-class Suite(object):
-
- _root = attr.ib(default=attr.Factory(_find_suite))
-
- def _remotes(self):
- jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"])
- remotes = subprocess.check_output(
- [sys.executable, jsonschema_suite.path, "remotes"],
- )
- return {
- "http://localhost:1234/" + name: schema
- for name, schema in json.loads(remotes.decode("utf-8")).items()
- }
-
- def benchmark(self, runner): # pragma: no cover
- for name in validators:
- self.version(name=name).benchmark(runner=runner)
-
- def version(self, name):
- return Version(
- name=name,
- path=self._root.descendant(["tests", name]),
- remotes=self._remotes(),
- )
-
-
-@attr.s(hash=True)
-class Version(object):
-
- _path = attr.ib()
- _remotes = attr.ib()
-
- name = attr.ib()
-
- def benchmark(self, runner, **kwargs): # pragma: no cover
- for suite in self.tests():
- for test in suite:
- runner.bench_func(
- test.fully_qualified_name,
- partial(test.validate_ignoring_errors, **kwargs),
- )
-
- def tests(self):
- return (
- test
- for child in self._path.globChildren("*.json")
- for test in self._tests_in(
- subject=child.basename()[:-5],
- path=child,
- )
- )
-
- def format_tests(self):
- path = self._path.descendant(["optional", "format"])
- return (
- test
- for child in path.globChildren("*.json")
- for test in self._tests_in(
- subject=child.basename()[:-5],
- path=child,
- )
- )
-
- def tests_of(self, name):
- return self._tests_in(
- subject=name,
- path=self._path.child(name + ".json"),
- )
-
- def optional_tests_of(self, name):
- return self._tests_in(
- subject=name,
- path=self._path.descendant(["optional", name + ".json"]),
- )
-
- def to_unittest_testcase(self, *suites, **kwargs):
- name = kwargs.pop("name", "Test" + self.name.title())
- methods = {
- test.method_name: test.to_unittest_method(**kwargs)
- for suite in suites
- for tests in suite
- for test in tests
- }
- cls = type(name, (unittest.TestCase,), methods)
-
- try:
- cls.__module__ = _someone_save_us_the_module_of_the_caller()
- except Exception: # pragma: no cover
- # We're doing crazy things, so if they go wrong, like a function
- # behaving differently on some other interpreter, just make them
- # not happen.
- pass
-
- return cls
-
- def _tests_in(self, subject, path):
- for each in json.loads(path.getContent().decode("utf-8")):
- yield (
- _Test(
- version=self,
- subject=subject,
- case_description=each["description"],
- schema=each["schema"],
- remotes=self._remotes,
- **test
- ) for test in each["tests"]
- )
-
-
-@attr.s(hash=True, repr=False)
-class _Test(object):
-
- version = attr.ib()
-
- subject = attr.ib()
- case_description = attr.ib()
- description = attr.ib()
-
- data = attr.ib()
- schema = attr.ib(repr=False)
-
- valid = attr.ib()
-
- _remotes = attr.ib()
-
- def __repr__(self): # pragma: no cover
- return "".format(self.fully_qualified_name)
-
- @property
- def fully_qualified_name(self): # pragma: no cover
- return " > ".join(
- [
- self.version.name,
- self.subject,
- self.case_description,
- self.description,
- ]
- )
-
- @property
- def method_name(self):
- delimiters = r"[\W\- ]+"
- name = "test_%s_%s_%s" % (
- re.sub(delimiters, "_", self.subject),
- re.sub(delimiters, "_", self.case_description),
- re.sub(delimiters, "_", self.description),
- )
-
- if not PY3: # pragma: no cover
- name = name.encode("utf-8")
- return name
-
- def to_unittest_method(self, skip=lambda test: None, **kwargs):
- if self.valid:
- def fn(this):
- self.validate(**kwargs)
- else:
- def fn(this):
- with this.assertRaises(jsonschema.ValidationError):
- self.validate(**kwargs)
-
- fn.__name__ = self.method_name
- reason = skip(self)
- return unittest.skipIf(reason is not None, reason)(fn)
-
- def validate(self, Validator, **kwargs):
- resolver = jsonschema.RefResolver.from_schema(
- schema=self.schema,
- store=self._remotes,
- id_of=Validator.ID_OF,
- )
- jsonschema.validate(
- instance=self.data,
- schema=self.schema,
- cls=Validator,
- resolver=resolver,
- **kwargs
- )
-
- def validate_ignoring_errors(self, Validator): # pragma: no cover
- try:
- self.validate(Validator=Validator)
- except jsonschema.ValidationError:
- pass
-
-
-def _someone_save_us_the_module_of_the_caller():
- """
- The FQON of the module 2nd stack frames up from here.
-
- This is intended to allow us to dynamicallly return test case classes that
- are indistinguishable from being defined in the module that wants them.
-
- Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
- the class that really is running.
-
- Save us all, this is all so so so so so terrible.
- """
-
- return sys._getframe(2).f_globals["__name__"]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/compat.py
new file mode 100755
index 00000000..b37483f5
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/compat.py
@@ -0,0 +1,15 @@
+import sys
+
+
+if sys.version_info[:2] < (2, 7): # pragma: no cover
+ import unittest2 as unittest
+else:
+ import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+# flake8: noqa
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_cli.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_cli.py
old mode 100644
new mode 100755
index ed820ba3..09a2f6b0
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_cli.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_cli.py
@@ -1,11 +1,7 @@
-from unittest import TestCase
-import json
-import subprocess
-import sys
-
-from jsonschema import Draft4Validator, ValidationError, cli, __version__
-from jsonschema.compat import NativeIO
+from jsonschema import Draft4Validator, ValidationError, cli
+from jsonschema.compat import StringIO
from jsonschema.exceptions import SchemaError
+from jsonschema.tests.compat import mock, unittest
def fake_validator(*errors):
@@ -26,32 +22,28 @@ def check_schema(self, schema):
return FakeValidator
-class TestParser(TestCase):
-
+class TestParser(unittest.TestCase):
FakeValidator = fake_validator()
- instance_file = "foo.json"
- schema_file = "schema.json"
def setUp(self):
- cli.open = self.fake_open
- self.addCleanup(delattr, cli, "open")
-
- def fake_open(self, path):
- if path == self.instance_file:
- contents = ""
- elif path == self.schema_file:
- contents = {}
- else: # pragma: no cover
- self.fail("What is {!r}".format(path))
- return NativeIO(json.dumps(contents))
+ mock_open = mock.mock_open()
+ patch_open = mock.patch.object(cli, "open", mock_open, create=True)
+ patch_open.start()
+ self.addCleanup(patch_open.stop)
+
+ mock_json_load = mock.Mock()
+ mock_json_load.return_value = {}
+ patch_json_load = mock.patch("json.load")
+ patch_json_load.start()
+ self.addCleanup(patch_json_load.stop)
def test_find_validator_by_fully_qualified_object_name(self):
arguments = cli.parse_args(
[
"--validator",
"jsonschema.tests.test_cli.TestParser.FakeValidator",
- "--instance", self.instance_file,
- self.schema_file,
+ "--instance", "foo.json",
+ "schema.json",
]
)
self.assertIs(arguments["validator"], self.FakeValidator)
@@ -60,16 +52,16 @@ def test_find_validator_in_jsonschema(self):
arguments = cli.parse_args(
[
"--validator", "Draft4Validator",
- "--instance", self.instance_file,
- self.schema_file,
+ "--instance", "foo.json",
+ "schema.json",
]
)
self.assertIs(arguments["validator"], Draft4Validator)
-class TestCLI(TestCase):
+class TestCLI(unittest.TestCase):
def test_draft3_schema_draft4_validator(self):
- stdout, stderr = NativeIO(), NativeIO()
+ stdout, stderr = StringIO(), StringIO()
with self.assertRaises(SchemaError):
cli.run(
{
@@ -89,7 +81,7 @@ def test_draft3_schema_draft4_validator(self):
)
def test_successful_validation(self):
- stdout, stderr = NativeIO(), NativeIO()
+ stdout, stderr = StringIO(), StringIO()
exit_code = cli.run(
{
"validator": fake_validator(),
@@ -106,7 +98,7 @@ def test_successful_validation(self):
def test_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
- stdout, stderr = NativeIO(), NativeIO()
+ stdout, stderr = StringIO(), StringIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
@@ -127,7 +119,7 @@ def test_unsuccessful_validation_multiple_instances(self):
ValidationError("8", instance=1),
]
second_errors = [ValidationError("7", instance=2)]
- stdout, stderr = NativeIO(), NativeIO()
+ stdout, stderr = StringIO(), StringIO()
exit_code = cli.run(
{
"validator": fake_validator(first_errors, second_errors),
@@ -141,11 +133,3 @@ def test_unsuccessful_validation_multiple_instances(self):
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
self.assertEqual(exit_code, 1)
-
- def test_version(self):
- version = subprocess.check_output(
- [sys.executable, "-m", "jsonschema", "--version"],
- stderr=subprocess.STDOUT,
- )
- version = version.decode("utf-8").strip()
- self.assertEqual(version, __version__)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_exceptions.py
old mode 100644
new mode 100755
index eae00d76..1d7a6c6b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_exceptions.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_exceptions.py
@@ -1,19 +1,18 @@
-from unittest import TestCase
import textwrap
from jsonschema import Draft4Validator, exceptions
from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
-class TestBestMatch(TestCase):
+class TestBestMatch(unittest.TestCase):
def best_match(self, errors):
errors = list(errors)
best = exceptions.best_match(errors)
reversed_best = exceptions.best_match(reversed(errors))
msg = "Didn't return a consistent best match!\nGot: {0}\n\nThen: {1}"
self.assertEqual(
- best._contents(), reversed_best._contents(),
- msg=msg.format(best, reversed_best),
+ best, reversed_best, msg=msg.format(best, reversed_best),
)
return best
@@ -35,6 +34,7 @@ def test_oneOf_and_anyOf_are_weak_matches(self):
"""
A property you *must* match is probably better than one you have to
match a part of.
+
"""
validator = Draft4Validator(
@@ -55,6 +55,7 @@ def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
I.e. since only one of the schemas must match, we look for the most
relevant one.
+
"""
validator = Draft4Validator(
@@ -80,6 +81,7 @@ def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
I.e. since only one of the schemas must match, we look for the most
relevant one.
+
"""
validator = Draft4Validator(
@@ -101,6 +103,7 @@ def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
"""
Now, if the error is allOf, we traverse but select the *most* relevant
error from the context, because all schemas here must match anyways.
+
"""
validator = Draft4Validator(
@@ -156,7 +159,7 @@ def test_no_errors(self):
self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
-class TestByRelevance(TestCase):
+class TestByRelevance(unittest.TestCase):
def test_short_paths_are_better_matches(self):
shallow = exceptions.ValidationError("Oh no!", path=["baz"])
deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
@@ -208,13 +211,9 @@ def test_strong_validators_are_higher_priority(self):
self.assertIs(match, strong)
-class TestErrorTree(TestCase):
+class TestErrorTree(unittest.TestCase):
def test_it_knows_how_many_total_errors_it_contains(self):
- # FIXME: https://github.com/Julian/jsonschema/issues/442
- errors = [
- exceptions.ValidationError("Something", validator=i)
- for i in range(8)
- ]
+ errors = [mock.MagicMock() for _ in range(8)]
tree = exceptions.ErrorTree(errors)
self.assertEqual(tree.total_errors, 8)
@@ -250,7 +249,7 @@ def test_children_have_their_errors_dicts_built(self):
tree = exceptions.ErrorTree([e1, e2])
self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2})
- def test_multiple_errors_with_instance(self):
+ def test_regression_multiple_errors_with_instance(self):
e1, e2 = (
exceptions.ValidationError(
"1",
@@ -263,6 +262,7 @@ def test_multiple_errors_with_instance(self):
path=["foobar", 2],
instance="i2"),
)
+ # Will raise an exception if the bug is still there.
exceptions.ErrorTree([e1, e2])
def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
@@ -277,6 +277,7 @@ def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
If a validator is dumb (like :validator:`required` in draft 3) and
refers to a path that isn't in the instance, the tree still properly
returns a subtree for that path.
+
"""
error = exceptions.ValidationError(
@@ -286,7 +287,7 @@ def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
-class TestErrorInitReprStr(TestCase):
+class TestErrorInitReprStr(unittest.TestCase):
def make_error(self, **kwargs):
defaults = dict(
message=u"hello",
@@ -299,7 +300,7 @@ def make_error(self, **kwargs):
return exceptions.ValidationError(**defaults)
def assertShows(self, expected, **kwargs):
- if PY3: # pragma: no cover
+ if PY3:
expected = expected.replace("u'", "'")
expected = textwrap.dedent(expected).rstrip("\n")
@@ -375,77 +376,19 @@ def test_multiple_item_paths(self):
)
def test_uses_pprint(self):
- self.assertShows(
- """
- Failed validating u'maxLength' in schema:
- {0: 0,
- 1: 1,
- 2: 2,
- 3: 3,
- 4: 4,
- 5: 5,
- 6: 6,
- 7: 7,
- 8: 8,
- 9: 9,
- 10: 10,
- 11: 11,
- 12: 12,
- 13: 13,
- 14: 14,
- 15: 15,
- 16: 16,
- 17: 17,
- 18: 18,
- 19: 19}
-
- On instance:
- [0,
- 1,
- 2,
- 3,
- 4,
- 5,
- 6,
- 7,
- 8,
- 9,
- 10,
- 11,
- 12,
- 13,
- 14,
- 15,
- 16,
- 17,
- 18,
- 19,
- 20,
- 21,
- 22,
- 23,
- 24]
- """,
- instance=list(range(25)),
- schema=dict(zip(range(20), range(20))),
- validator=u"maxLength",
- )
+ with mock.patch("pprint.pformat") as pformat:
+ str(self.make_error())
+ self.assertEqual(pformat.call_count, 2) # schema + instance
def test_str_works_with_instances_having_overriden_eq_operator(self):
"""
Check for https://github.com/Julian/jsonschema/issues/164 which
rendered exceptions unusable when a `ValidationError` involved
instances with an `__eq__` method that returned truthy values.
- """
- class DontEQMeBro(object):
- def __eq__(this, other): # pragma: no cover
- self.fail("Don't!")
-
- def __ne__(this, other): # pragma: no cover
- self.fail("Don't!")
+ """
- instance = DontEQMeBro()
+ instance = mock.MagicMock()
error = exceptions.ValidationError(
"a message",
validator="foo",
@@ -453,10 +396,5 @@ def __ne__(this, other): # pragma: no cover
validator_value="some",
schema="schema",
)
- self.assertIn(repr(instance), str(error))
-
-
-class TestHashable(TestCase):
- def test_hashable(self):
- set([exceptions.ValidationError("")])
- set([exceptions.SchemaError("")])
+ str(error)
+ self.assertFalse(instance.__eq__.called)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_format.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_format.py
old mode 100644
new mode 100755
index 254985f6..ee49e2ff
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_format.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_format.py
@@ -1,24 +1,18 @@
"""
Tests for the parts of jsonschema related to the :validator:`format` property.
+
"""
-from unittest import TestCase
+from jsonschema.tests.compat import mock, unittest
from jsonschema import FormatError, ValidationError, FormatChecker
from jsonschema.validators import Draft4Validator
-BOOM = ValueError("Boom!")
-BANG = ZeroDivisionError("Bang!")
-
-
-def boom(thing):
- if thing == "bang":
- raise BANG
- raise BOOM
-
+class TestFormatChecker(unittest.TestCase):
+ def setUp(self):
+ self.fn = mock.Mock()
-class TestFormatChecker(TestCase):
def test_it_can_validate_no_formats(self):
checker = FormatChecker(formats=())
self.assertFalse(checker.checkers)
@@ -28,62 +22,42 @@ def test_it_raises_a_key_error_for_unknown_formats(self):
FormatChecker(formats=["o noes"])
def test_it_can_register_cls_checkers(self):
- original = dict(FormatChecker.checkers)
- self.addCleanup(FormatChecker.checkers.pop, "boom")
- FormatChecker.cls_checks("boom")(boom)
- self.assertEqual(
- FormatChecker.checkers,
- dict(original, boom=(boom, ())),
- )
+ with mock.patch.dict(FormatChecker.checkers, clear=True):
+ FormatChecker.cls_checks("new")(self.fn)
+ self.assertEqual(FormatChecker.checkers, {"new": (self.fn, ())})
def test_it_can_register_checkers(self):
checker = FormatChecker()
- checker.checks("boom")(boom)
+ checker.checks("new")(self.fn)
self.assertEqual(
checker.checkers,
- dict(FormatChecker.checkers, boom=(boom, ()))
+ dict(FormatChecker.checkers, new=(self.fn, ()))
)
def test_it_catches_registered_errors(self):
checker = FormatChecker()
- checker.checks("boom", raises=type(BOOM))(boom)
+ cause = self.fn.side_effect = ValueError()
+
+ checker.checks("foo", raises=ValueError)(self.fn)
with self.assertRaises(FormatError) as cm:
- checker.check(instance=12, format="boom")
+ checker.check("bar", "foo")
- self.assertIs(cm.exception.cause, BOOM)
- self.assertIs(cm.exception.__cause__, BOOM)
+ self.assertIs(cm.exception.cause, cause)
+ self.assertIs(cm.exception.__cause__, cause)
# Unregistered errors should not be caught
- with self.assertRaises(type(BANG)):
- checker.check(instance="bang", format="boom")
+ self.fn.side_effect = AttributeError
+ with self.assertRaises(AttributeError):
+ checker.check("bar", "foo")
def test_format_error_causes_become_validation_error_causes(self):
checker = FormatChecker()
- checker.checks("boom", raises=ValueError)(boom)
- validator = Draft4Validator({"format": "boom"}, format_checker=checker)
+ checker.checks("foo", raises=ValueError)(self.fn)
+ cause = self.fn.side_effect = ValueError()
+ validator = Draft4Validator({"format": "foo"}, format_checker=checker)
with self.assertRaises(ValidationError) as cm:
- validator.validate("BOOM")
+ validator.validate("bar")
- self.assertIs(cm.exception.cause, BOOM)
- self.assertIs(cm.exception.__cause__, BOOM)
-
- def test_format_checkers_come_with_defaults(self):
- # This is bad :/ but relied upon.
- # The docs for quite awhile recommended people do things like
- # validate(..., format_checker=FormatChecker())
- # We should change that, but we can't without deprecation...
- checker = FormatChecker()
- with self.assertRaises(FormatError):
- checker.check(instance="not-an-ipv4", format="ipv4")
-
- def test_repr(self):
- checker = FormatChecker(formats=())
- checker.checks("foo")(lambda thing: True)
- checker.checks("bar")(lambda thing: True)
- checker.checks("baz")(lambda thing: True)
- self.assertEqual(
- repr(checker),
- "",
- )
+ self.assertIs(cm.exception.__cause__, cause)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py
old mode 100644
new mode 100755
index ebccf297..a45dcdd1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py
@@ -3,275 +3,288 @@
Tests comprehensive correctness of each draft's validator.
-See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
+See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
+
"""
+from contextlib import closing
+from decimal import Decimal
+import glob
+import json
+import io
+import itertools
+import os
+import re
+import subprocess
import sys
-import warnings
+
+try:
+ from sys import pypy_version_info
+except ImportError:
+ pypy_version_info = None
from jsonschema import (
- Draft3Validator,
- Draft4Validator,
- Draft6Validator,
- Draft7Validator,
- draft3_format_checker,
- draft4_format_checker,
- draft6_format_checker,
- draft7_format_checker,
+ FormatError, SchemaError, ValidationError, Draft3Validator,
+ Draft4Validator, FormatChecker, draft3_format_checker,
+ draft4_format_checker, validate,
)
-from jsonschema.tests._helpers import bug
-from jsonschema.tests._suite import Suite
-from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create
+from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
+import jsonschema
-SUITE = Suite()
-DRAFT3 = SUITE.version(name="draft3")
-DRAFT4 = SUITE.version(name="draft4")
-DRAFT6 = SUITE.version(name="draft6")
-DRAFT7 = SUITE.version(name="draft7")
+REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
+SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
+if not os.path.isdir(SUITE):
+ raise ValueError(
+ "Can't find the JSON-Schema-Test-Suite directory. Set the "
+ "'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
+ "alongside a checkout of the suite."
+ )
-def skip(message, **kwargs):
- def skipper(test):
- if all(value == getattr(test, attr) for attr, value in kwargs.items()):
- return message
- return skipper
+TESTS_DIR = os.path.join(SUITE, "tests")
+JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
+remotes_stdout = subprocess.Popen(
+ ["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
+).stdout
+
+with closing(remotes_stdout):
+ if PY3:
+ remotes_stdout = io.TextIOWrapper(remotes_stdout)
+ REMOTES = json.load(remotes_stdout)
+
+
+def make_case(schema, data, valid, name):
+ if valid:
+ def test_case(self):
+ kwargs = getattr(self, "validator_kwargs", {})
+ validate(data, schema, cls=self.validator_class, **kwargs)
+ else:
+ def test_case(self):
+ kwargs = getattr(self, "validator_kwargs", {})
+ with self.assertRaises(ValidationError):
+ validate(data, schema, cls=self.validator_class, **kwargs)
+
+ if not PY3:
+ name = name.encode("utf-8")
+ test_case.__name__ = name
+
+ return test_case
+
+
+def maybe_skip(skip, test_case, case, test):
+ if skip is not None:
+ reason = skip(case, test)
+ if reason is not None:
+ test_case = unittest.skip(reason)(test_case)
+ return test_case
+
+
+def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
+ if ignore_glob:
+ ignore_glob = os.path.join(basedir, ignore_glob)
+
+ def add_test_methods(test_class):
+ ignored = set(glob.iglob(ignore_glob))
+
+ for filename in glob.iglob(os.path.join(basedir, tests_glob)):
+ if filename in ignored:
+ continue
+
+ validating, _ = os.path.splitext(os.path.basename(filename))
+ id = itertools.count(1)
+
+ with open(filename) as test_file:
+ for case in json.load(test_file):
+ for test in case["tests"]:
+ name = "test_%s_%s_%s" % (
+ validating,
+ next(id),
+ re.sub(r"[\W ]+", "_", test["description"]),
+ )
+ assert not hasattr(test_class, name), name
+
+ test_case = make_case(
+ data=test["data"],
+ schema=case["schema"],
+ valid=test["valid"],
+ name=name,
+ )
+ test_case = maybe_skip(skip, test_case, case, test)
+ setattr(test_class, name, test_case)
+
+ return test_class
+ return add_test_methods
+
+
+class TypesMixin(object):
+ @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
+ def test_string_a_bytestring_is_a_string(self):
+ self.validator_class({"type": "string"}).validate(b"foo")
+
+
+class DecimalMixin(object):
+ def test_it_can_validate_with_decimals(self):
+ schema = {"type": "number"}
+ validator = self.validator_class(
+ schema, types={"number": (int, float, Decimal)}
+ )
+
+ for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
+ validator.validate(valid)
+
+ for invalid in ["foo", {}, [], True, None]:
+ with self.assertRaises(ValidationError):
+ validator.validate(invalid)
-def missing_format(checker):
- def missing_format(test):
- schema = test.schema
- if schema is True or schema is False or "format" not in schema:
- return
- if schema["format"] not in checker.checkers:
- return "Format checker {0!r} not found.".format(schema["format"])
+def missing_format(checker):
+ def missing_format(case, test):
+ format = case["schema"].get("format")
+ if format not in checker.checkers:
+ return "Format checker {0!r} not found.".format(format)
+ elif (
+ format == "date-time" and
+ pypy_version_info is not None and
+ pypy_version_info[:2] <= (1, 9)
+ ):
+ # datetime.datetime is overzealous about typechecking in <=1.9
+ return "datetime.datetime is broken on this version of PyPy."
return missing_format
-is_narrow_build = sys.maxunicode == 2 ** 16 - 1
-if is_narrow_build: # pragma: no cover
- message = "Not running surrogate Unicode case, this Python is narrow."
+class FormatMixin(object):
+ def test_it_returns_true_for_formats_it_does_not_know_about(self):
+ validator = self.validator_class(
+ {"format": "carrot"}, format_checker=FormatChecker(),
+ )
+ validator.validate("bugs")
+
+ def test_it_does_not_validate_formats_by_default(self):
+ validator = self.validator_class({})
+ self.assertIsNone(validator.format_checker)
+
+ def test_it_validates_formats_if_a_checker_is_provided(self):
+ checker = mock.Mock(spec=FormatChecker)
+ validator = self.validator_class(
+ {"format": "foo"}, format_checker=checker,
+ )
+
+ validator.validate("bar")
+
+ checker.check.assert_called_once_with("bar", "foo")
+
+ cause = ValueError()
+ checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate("bar")
+ # Make sure original cause is attached
+ self.assertIs(cm.exception.cause, cause)
+
+ def test_it_validates_formats_of_any_type(self):
+ checker = mock.Mock(spec=FormatChecker)
+ validator = self.validator_class(
+ {"format": "foo"}, format_checker=checker,
+ )
+
+ validator.validate([1, 2, 3])
+
+ checker.check.assert_called_once_with([1, 2, 3], "foo")
+
+ cause = ValueError()
+ checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate([1, 2, 3])
+ # Make sure original cause is attached
+ self.assertIs(cm.exception.cause, cause)
- def narrow_unicode_build(test): # pragma: no cover
- return skip(
- message=message,
- description="one supplementary Unicode code point is not long enough",
- )(test) or skip(
- message=message,
- description="two supplementary Unicode code points is long enough",
- )(test)
+
+if sys.maxunicode == 2 ** 16 - 1: # This is a narrow build.
+ def narrow_unicode_build(case, test):
+ if "supplementary Unicode" in test["description"]:
+ return "Not running surrogate Unicode case, this Python is narrow."
else:
- def narrow_unicode_build(test): # pragma: no cover
+ def narrow_unicode_build(case, test): # This isn't, skip nothing.
return
-TestDraft3 = DRAFT3.to_unittest_testcase(
- DRAFT3.tests(),
- DRAFT3.optional_tests_of(name="bignum"),
- DRAFT3.optional_tests_of(name="format"),
- DRAFT3.optional_tests_of(name="zeroTerminatedFloats"),
- Validator=Draft3Validator,
- format_checker=draft3_format_checker,
- skip=lambda test: (
- narrow_unicode_build(test)
- or missing_format(draft3_format_checker)(test)
- or skip(
- message="Upstream bug in strict_rfc3339",
- subject="format",
- description="case-insensitive T and Z",
- )(test)
- ),
+@load_json_cases(
+ "draft3/*.json",
+ skip=narrow_unicode_build,
+ ignore_glob="draft3/refRemote.json",
+)
+@load_json_cases(
+ "draft3/optional/format.json", skip=missing_format(draft3_format_checker)
)
+@load_json_cases("draft3/optional/bignum.json")
+@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
+class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+ validator_class = Draft3Validator
+ validator_kwargs = {"format_checker": draft3_format_checker}
+ def test_any_type_is_valid_for_type_any(self):
+ validator = self.validator_class({"type": "any"})
+ validator.validate(mock.Mock())
-TestDraft4 = DRAFT4.to_unittest_testcase(
- DRAFT4.tests(),
- DRAFT4.optional_tests_of(name="bignum"),
- DRAFT4.optional_tests_of(name="format"),
- DRAFT4.optional_tests_of(name="zeroTerminatedFloats"),
- Validator=Draft4Validator,
- format_checker=draft4_format_checker,
- skip=lambda test: (
- narrow_unicode_build(test)
- or missing_format(draft4_format_checker)(test)
- or skip(
- message=bug(),
- subject="ref",
- case_description="Recursive references between schemas",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description="Location-independent identifier",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with absolute URI"
- ),
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with base URI change in subschema"
- ),
- )(test)
- or skip(
- message=bug(),
- subject="refRemote",
- case_description="base URI change - change folder in subschema",
- )(test)
- or skip(
- message="Upstream bug in strict_rfc3339",
- subject="format",
- description="case-insensitive T and Z",
- )(test)
- ),
-)
+ # TODO: we're in need of more meta schema tests
+ def test_invalid_properties(self):
+ with self.assertRaises(SchemaError):
+ validate({}, {"properties": {"test": True}},
+ cls=self.validator_class)
+ def test_minItems_invalid_string(self):
+ with self.assertRaises(SchemaError):
+ # needs to be an integer
+ validate([1], {"minItems": "1"}, cls=self.validator_class)
-TestDraft6 = DRAFT6.to_unittest_testcase(
- DRAFT6.tests(),
- DRAFT6.optional_tests_of(name="bignum"),
- DRAFT6.optional_tests_of(name="format"),
- DRAFT6.optional_tests_of(name="zeroTerminatedFloats"),
- Validator=Draft6Validator,
- format_checker=draft6_format_checker,
- skip=lambda test: (
- narrow_unicode_build(test)
- or missing_format(draft6_format_checker)(test)
- or skip(
- message=bug(),
- subject="ref",
- case_description="Recursive references between schemas",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description="Location-independent identifier",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with absolute URI"
- ),
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with base URI change in subschema"
- ),
- )(test)
- or skip(
- message=bug(),
- subject="refRemote",
- case_description="base URI change - change folder in subschema",
- )(test)
- or skip(
- message="Upstream bug in strict_rfc3339",
- subject="format",
- description="case-insensitive T and Z",
- )(test)
- ),
+
+@load_json_cases(
+ "draft4/*.json",
+ skip=narrow_unicode_build,
+ ignore_glob="draft4/refRemote.json",
+)
+@load_json_cases(
+ "draft4/optional/format.json", skip=missing_format(draft4_format_checker)
)
+@load_json_cases("draft4/optional/bignum.json")
+@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
+class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+ validator_class = Draft4Validator
+ validator_kwargs = {"format_checker": draft4_format_checker}
+ # TODO: we're in need of more meta schema tests
+ def test_invalid_properties(self):
+ with self.assertRaises(SchemaError):
+ validate({}, {"properties": {"test": True}},
+ cls=self.validator_class)
-TestDraft7 = DRAFT7.to_unittest_testcase(
- DRAFT7.tests(),
- DRAFT7.format_tests(),
- DRAFT7.optional_tests_of(name="bignum"),
- DRAFT7.optional_tests_of(name="content"),
- DRAFT7.optional_tests_of(name="zeroTerminatedFloats"),
- Validator=Draft7Validator,
- format_checker=draft7_format_checker,
- skip=lambda test: (
- narrow_unicode_build(test)
- or missing_format(draft7_format_checker)(test)
- or skip(
- message=bug(),
- subject="ref",
- case_description="Recursive references between schemas",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description="Location-independent identifier",
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with absolute URI"
- ),
- )(test)
- or skip(
- message=bug(371),
- subject="ref",
- case_description=(
- "Location-independent identifier with base URI change in subschema"
- ),
- )(test)
- or skip(
- message=bug(),
- subject="refRemote",
- case_description="base URI change - change folder in subschema",
- )(test)
- or skip(
- message="Upstream bug in strict_rfc3339",
- subject="date-time",
- description="case-insensitive T and Z",
- )(test)
- or skip(
- message=bug(593),
- subject="content",
- case_description=(
- "validation of string-encoded content based on media type"
- ),
- )(test)
- or skip(
- message=bug(593),
- subject="content",
- case_description="validation of binary string-encoding",
- )(test)
- or skip(
- message=bug(593),
- subject="content",
- case_description=(
- "validation of binary-encoded media type documents"
- ),
- )(test)
- ),
-)
+ def test_minItems_invalid_string(self):
+ with self.assertRaises(SchemaError):
+ # needs to be an integer
+ validate([1], {"minItems": "1"}, cls=self.validator_class)
-with warnings.catch_warnings():
- warnings.simplefilter("ignore", DeprecationWarning)
-
- TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase(
- # Interestingly the any part couldn't really be done w/the old API.
- (
- (test for test in each if test.schema != {"type": "any"})
- for each in DRAFT3.tests_of(name="type")
- ),
- name="TestDraft3LegacyTypeCheck",
- Validator=create(
- meta_schema=Draft3Validator.META_SCHEMA,
- validators=Draft3Validator.VALIDATORS,
- default_types=_DEPRECATED_DEFAULT_TYPES,
- ),
- )
+class RemoteRefResolutionMixin(object):
+ def setUp(self):
+ patch = mock.patch("jsonschema.validators.requests")
+ requests = patch.start()
+ requests.get.side_effect = self.resolve
+ self.addCleanup(patch.stop)
- TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase(
- DRAFT4.tests_of(name="type"),
- name="TestDraft4LegacyTypeCheck",
- Validator=create(
- meta_schema=Draft4Validator.META_SCHEMA,
- validators=Draft4Validator.VALIDATORS,
- default_types=_DEPRECATED_DEFAULT_TYPES,
- ),
- )
+ def resolve(self, reference):
+ _, _, reference = reference.partition("http://localhost:1234/")
+ return mock.Mock(**{"json.return_value": REMOTES.get(reference)})
+
+
+@load_json_cases("draft3/refRemote.json")
+class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+ validator_class = Draft3Validator
+
+
+@load_json_cases("draft4/refRemote.json")
+class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+ validator_class = Draft4Validator
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_types.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_types.py
deleted file mode 100644
index 2280cc39..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_types.py
+++ /dev/null
@@ -1,190 +0,0 @@
-"""
-Tests on the new type interface. The actual correctness of the type checking
-is handled in test_jsonschema_test_suite; these tests check that TypeChecker
-functions correctly and can facilitate extensions to type checking
-"""
-from collections import namedtuple
-from unittest import TestCase
-
-from jsonschema import ValidationError, _validators
-from jsonschema._types import TypeChecker
-from jsonschema.exceptions import UndefinedTypeCheck
-from jsonschema.validators import Draft4Validator, extend
-
-
-def equals_2(checker, instance):
- return instance == 2
-
-
-def is_namedtuple(instance):
- return isinstance(instance, tuple) and getattr(instance, "_fields", None)
-
-
-def is_object_or_named_tuple(checker, instance):
- if Draft4Validator.TYPE_CHECKER.is_type(instance, "object"):
- return True
- return is_namedtuple(instance)
-
-
-def coerce_named_tuple(fn):
- def coerced(validator, value, instance, schema):
- if is_namedtuple(instance):
- instance = instance._asdict()
- return fn(validator, value, instance, schema)
- return coerced
-
-
-required = coerce_named_tuple(_validators.required)
-properties = coerce_named_tuple(_validators.properties)
-
-
-class TestTypeChecker(TestCase):
- def test_is_type(self):
- checker = TypeChecker({"two": equals_2})
- self.assertEqual(
- (
- checker.is_type(instance=2, type="two"),
- checker.is_type(instance="bar", type="two"),
- ),
- (True, False),
- )
-
- def test_is_unknown_type(self):
- with self.assertRaises(UndefinedTypeCheck) as context:
- TypeChecker().is_type(4, "foobar")
- self.assertIn("foobar", str(context.exception))
-
- def test_checks_can_be_added_at_init(self):
- checker = TypeChecker({"two": equals_2})
- self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
-
- def test_redefine_existing_type(self):
- self.assertEqual(
- TypeChecker().redefine("two", object()).redefine("two", equals_2),
- TypeChecker().redefine("two", equals_2),
- )
-
- def test_remove(self):
- self.assertEqual(
- TypeChecker({"two": equals_2}).remove("two"),
- TypeChecker(),
- )
-
- def test_remove_unknown_type(self):
- with self.assertRaises(UndefinedTypeCheck) as context:
- TypeChecker().remove("foobar")
- self.assertIn("foobar", str(context.exception))
-
- def test_redefine_many(self):
- self.assertEqual(
- TypeChecker().redefine_many({"foo": int, "bar": str}),
- TypeChecker().redefine("foo", int).redefine("bar", str),
- )
-
- def test_remove_multiple(self):
- self.assertEqual(
- TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
- TypeChecker(),
- )
-
- def test_type_check_can_raise_key_error(self):
- """
- Make sure no one writes:
-
- try:
- self._type_checkers[type](...)
- except KeyError:
-
- ignoring the fact that the function itself can raise that.
- """
-
- error = KeyError("Stuff")
-
- def raises_keyerror(checker, instance):
- raise error
-
- with self.assertRaises(KeyError) as context:
- TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
-
- self.assertIs(context.exception, error)
-
-
-class TestCustomTypes(TestCase):
- def test_simple_type_can_be_extended(self):
- def int_or_str_int(checker, instance):
- if not isinstance(instance, (int, str)):
- return False
- try:
- int(instance)
- except ValueError:
- return False
- return True
-
- CustomValidator = extend(
- Draft4Validator,
- type_checker=Draft4Validator.TYPE_CHECKER.redefine(
- "integer", int_or_str_int,
- ),
- )
- validator = CustomValidator({"type": "integer"})
-
- validator.validate(4)
- validator.validate("4")
-
- with self.assertRaises(ValidationError):
- validator.validate(4.4)
-
- def test_object_can_be_extended(self):
- schema = {"type": "object"}
-
- Point = namedtuple("Point", ["x", "y"])
-
- type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- u"object", is_object_or_named_tuple,
- )
-
- CustomValidator = extend(Draft4Validator, type_checker=type_checker)
- validator = CustomValidator(schema)
-
- validator.validate(Point(x=4, y=5))
-
- def test_object_extensions_require_custom_validators(self):
- schema = {"type": "object", "required": ["x"]}
-
- type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- u"object", is_object_or_named_tuple,
- )
-
- CustomValidator = extend(Draft4Validator, type_checker=type_checker)
- validator = CustomValidator(schema)
-
- Point = namedtuple("Point", ["x", "y"])
- # Cannot handle required
- with self.assertRaises(ValidationError):
- validator.validate(Point(x=4, y=5))
-
- def test_object_extensions_can_handle_custom_validators(self):
- schema = {
- "type": "object",
- "required": ["x"],
- "properties": {"x": {"type": "integer"}},
- }
-
- type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- u"object", is_object_or_named_tuple,
- )
-
- CustomValidator = extend(
- Draft4Validator,
- type_checker=type_checker,
- validators={"required": required, "properties": properties},
- )
-
- validator = CustomValidator(schema)
-
- Point = namedtuple("Point", ["x", "y"])
- # Can now process required and properties
- validator.validate(Point(x=4, y=5))
-
- with self.assertRaises(ValidationError):
- validator.validate(Point(x="not an integer", y=5))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_validators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_validators.py
old mode 100644
new mode 100755
index 07be4f08..fb59b830
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_validators.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/tests/test_validators.py
@@ -1,404 +1,82 @@
from collections import deque
from contextlib import contextmanager
-from decimal import Decimal
-from io import BytesIO
-from unittest import TestCase
import json
-import os
-import sys
-import tempfile
-import unittest
-from twisted.trial.unittest import SynchronousTestCase
-import attr
+from jsonschema import FormatChecker, ValidationError
+from jsonschema.tests.compat import mock, unittest
+from jsonschema.validators import (
+ RefResolutionError, UnknownType, Draft3Validator,
+ Draft4Validator, RefResolver, create, extend, validator_for, validate,
+)
-from jsonschema import FormatChecker, TypeChecker, exceptions, validators
-from jsonschema.compat import PY3, pathname2url
-from jsonschema.tests._helpers import bug
-
-def startswith(validator, startswith, instance, schema):
- if not instance.startswith(startswith):
- yield exceptions.ValidationError(u"Whoops!")
-
-
-class TestCreateAndExtend(SynchronousTestCase):
+class TestCreateAndExtend(unittest.TestCase):
def setUp(self):
- self.addCleanup(
- self.assertEqual,
- validators.meta_schemas,
- dict(validators.meta_schemas),
- )
-
- self.meta_schema = {u"$id": "some://meta/schema"}
- self.validators = {u"startswith": startswith}
- self.type_checker = TypeChecker()
- self.Validator = validators.create(
+ self.meta_schema = {u"properties": {u"smelly": {}}}
+ self.smelly = mock.MagicMock()
+ self.validators = {u"smelly": self.smelly}
+ self.types = {u"dict": dict}
+ self.Validator = create(
meta_schema=self.meta_schema,
validators=self.validators,
- type_checker=self.type_checker,
+ default_types=self.types,
)
+ self.validator_value = 12
+ self.schema = {u"smelly": self.validator_value}
+ self.validator = self.Validator(self.schema)
+
def test_attrs(self):
- self.assertEqual(
- (
- self.Validator.VALIDATORS,
- self.Validator.META_SCHEMA,
- self.Validator.TYPE_CHECKER,
- ), (
- self.validators,
- self.meta_schema,
- self.type_checker,
- ),
- )
+ self.assertEqual(self.Validator.VALIDATORS, self.validators)
+ self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
+ self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
def test_init(self):
- schema = {u"startswith": u"foo"}
- self.assertEqual(self.Validator(schema).schema, schema)
+ self.assertEqual(self.validator.schema, self.schema)
def test_iter_errors(self):
- schema = {u"startswith": u"hel"}
- iter_errors = self.Validator(schema).iter_errors
-
- errors = list(iter_errors(u"hello"))
- self.assertEqual(errors, [])
-
- expected_error = exceptions.ValidationError(
- u"Whoops!",
- instance=u"goodbye",
- schema=schema,
- validator=u"startswith",
- validator_value=u"hel",
- schema_path=deque([u"startswith"]),
- )
-
- errors = list(iter_errors(u"goodbye"))
- self.assertEqual(len(errors), 1)
- self.assertEqual(errors[0]._contents(), expected_error._contents())
-
- def test_if_a_version_is_provided_it_is_registered(self):
- Validator = validators.create(
- meta_schema={u"$id": "something"},
- version="my version",
- )
- self.addCleanup(validators.meta_schemas.pop, "something")
- self.assertEqual(Validator.__name__, "MyVersionValidator")
-
- def test_if_a_version_is_not_provided_it_is_not_registered(self):
- original = dict(validators.meta_schemas)
- validators.create(meta_schema={u"id": "id"})
- self.assertEqual(validators.meta_schemas, original)
-
- def test_validates_registers_meta_schema_id(self):
- meta_schema_key = "meta schema id"
- my_meta_schema = {u"id": meta_schema_key}
-
- validators.create(
- meta_schema=my_meta_schema,
- version="my version",
- id_of=lambda s: s.get("id", ""),
- )
- self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
-
- self.assertIn(meta_schema_key, validators.meta_schemas)
+ instance = "hello"
- def test_validates_registers_meta_schema_draft6_id(self):
- meta_schema_key = "meta schema $id"
- my_meta_schema = {u"$id": meta_schema_key}
+ self.smelly.return_value = []
+ self.assertEqual(list(self.validator.iter_errors(instance)), [])
- validators.create(
- meta_schema=my_meta_schema,
- version="my version",
- )
- self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
-
- self.assertIn(meta_schema_key, validators.meta_schemas)
-
- def test_create_default_types(self):
- Validator = validators.create(meta_schema={}, validators=())
- self.assertTrue(
- all(
- Validator({}).is_type(instance=instance, type=type)
- for type, instance in [
- (u"array", []),
- (u"boolean", True),
- (u"integer", 12),
- (u"null", None),
- (u"number", 12.0),
- (u"object", {}),
- (u"string", u"foo"),
- ]
- ),
- )
-
- def test_extend(self):
- original = dict(self.Validator.VALIDATORS)
- new = object()
-
- Extended = validators.extend(
- self.Validator,
- validators={u"new": new},
- )
- self.assertEqual(
- (
- Extended.VALIDATORS,
- Extended.META_SCHEMA,
- Extended.TYPE_CHECKER,
- self.Validator.VALIDATORS,
- ), (
- dict(original, new=new),
- self.Validator.META_SCHEMA,
- self.Validator.TYPE_CHECKER,
- original,
- ),
- )
+ error = mock.Mock()
+ self.smelly.return_value = [error]
+ self.assertEqual(list(self.validator.iter_errors(instance)), [error])
- def test_extend_idof(self):
- """
- Extending a validator preserves its notion of schema IDs.
- """
- def id_of(schema):
- return schema.get(u"__test__", self.Validator.ID_OF(schema))
- correct_id = "the://correct/id/"
- meta_schema = {
- u"$id": "the://wrong/id/",
- u"__test__": correct_id,
- }
- Original = validators.create(
- meta_schema=meta_schema,
- validators=self.validators,
- type_checker=self.type_checker,
- id_of=id_of,
+ self.smelly.assert_called_with(
+ self.validator, self.validator_value, instance, self.schema,
)
- self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id)
-
- Derived = validators.extend(Original)
- self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id)
+ def test_if_a_version_is_provided_it_is_registered(self):
+ with mock.patch("jsonschema.validators.validates") as validates:
+ validates.side_effect = lambda version: lambda cls: cls
+ Validator = create(meta_schema={u"id": ""}, version="my version")
+ validates.assert_called_once_with("my version")
+ self.assertEqual(Validator.__name__, "MyVersionValidator")
-class TestLegacyTypeChecking(SynchronousTestCase):
- def test_create_default_types(self):
- Validator = validators.create(meta_schema={}, validators=())
- self.assertEqual(
- set(Validator.DEFAULT_TYPES), {
- u"array",
- u"boolean",
- u"integer",
- u"null",
- u"number",
- u"object", u"string",
- },
- )
- self.flushWarnings()
+ def test_if_a_version_is_not_provided_it_is_not_registered(self):
+ with mock.patch("jsonschema.validators.validates") as validates:
+ create(meta_schema={u"id": "id"})
+ self.assertFalse(validates.called)
def test_extend(self):
- Validator = validators.create(meta_schema={}, validators=())
- original = dict(Validator.VALIDATORS)
- new = object()
-
- Extended = validators.extend(
- Validator,
- validators={u"new": new},
- )
- self.assertEqual(
- (
- Extended.VALIDATORS,
- Extended.META_SCHEMA,
- Extended.TYPE_CHECKER,
- Validator.VALIDATORS,
-
- Extended.DEFAULT_TYPES,
- Extended({}).DEFAULT_TYPES,
- self.flushWarnings()[0]["message"],
- ), (
- dict(original, new=new),
- Validator.META_SCHEMA,
- Validator.TYPE_CHECKER,
- original,
-
- Validator.DEFAULT_TYPES,
- Validator.DEFAULT_TYPES,
- self.flushWarnings()[0]["message"],
- ),
- )
-
- def test_types_redefines_the_validators_type_checker(self):
- schema = {"type": "string"}
- self.assertFalse(validators.Draft7Validator(schema).is_valid(12))
-
- validator = validators.Draft7Validator(
- schema,
- types={"string": (str, int)},
- )
- self.assertTrue(validator.is_valid(12))
- self.flushWarnings()
-
- def test_providing_default_types_warns(self):
- self.assertWarns(
- category=DeprecationWarning,
- message=(
- "The default_types argument is deprecated. "
- "Use the type_checker argument instead."
- ),
- # https://tm.tl/9363 :'(
- filename=sys.modules[self.assertWarns.__module__].__file__,
-
- f=validators.create,
- meta_schema={},
- validators={},
- default_types={"foo": object},
- )
-
- def test_cannot_ask_for_default_types_with_non_default_type_checker(self):
- """
- We raise an error when you ask a validator with non-default
- type checker for its DEFAULT_TYPES.
-
- The type checker argument is new, so no one but this library
- itself should be trying to use it, and doing so while then
- asking for DEFAULT_TYPES makes no sense (not to mention is
- deprecated), since type checkers are not strictly about Python
- type.
- """
- Validator = validators.create(
- meta_schema={},
- validators={},
- type_checker=TypeChecker(),
- )
- with self.assertRaises(validators._DontDoThat) as e:
- Validator.DEFAULT_TYPES
-
- self.assertIn(
- "DEFAULT_TYPES cannot be used on Validators using TypeCheckers",
- str(e.exception),
- )
- with self.assertRaises(validators._DontDoThat):
- Validator({}).DEFAULT_TYPES
-
- self.assertFalse(self.flushWarnings())
-
- def test_providing_explicit_type_checker_does_not_warn(self):
- Validator = validators.create(
- meta_schema={},
- validators={},
- type_checker=TypeChecker(),
- )
- self.assertFalse(self.flushWarnings())
-
- Validator({})
- self.assertFalse(self.flushWarnings())
-
- def test_providing_neither_does_not_warn(self):
- Validator = validators.create(meta_schema={}, validators={})
- self.assertFalse(self.flushWarnings())
-
- Validator({})
- self.assertFalse(self.flushWarnings())
-
- def test_providing_default_types_with_type_checker_errors(self):
- with self.assertRaises(TypeError) as e:
- validators.create(
- meta_schema={},
- validators={},
- default_types={"foo": object},
- type_checker=TypeChecker(),
- )
-
- self.assertIn(
- "Do not specify default_types when providing a type checker",
- str(e.exception),
- )
- self.assertFalse(self.flushWarnings())
-
- def test_extending_a_legacy_validator_with_a_type_checker_errors(self):
- Validator = validators.create(
- meta_schema={},
- validators={},
- default_types={u"array": list}
- )
- with self.assertRaises(TypeError) as e:
- validators.extend(
- Validator,
- validators={},
- type_checker=TypeChecker(),
- )
-
- self.assertIn(
- (
- "Cannot extend a validator created with default_types "
- "with a type_checker. Update the validator to use a "
- "type_checker when created."
- ),
- str(e.exception),
- )
- self.flushWarnings()
-
- def test_extending_a_legacy_validator_does_not_rewarn(self):
- Validator = validators.create(meta_schema={}, default_types={})
- self.assertTrue(self.flushWarnings())
-
- validators.extend(Validator)
- self.assertFalse(self.flushWarnings())
-
- def test_accessing_default_types_warns(self):
- Validator = validators.create(meta_schema={}, validators={})
- self.assertFalse(self.flushWarnings())
-
- self.assertWarns(
- DeprecationWarning,
- (
- "The DEFAULT_TYPES attribute is deprecated. "
- "See the type checker attached to this validator instead."
- ),
- # https://tm.tl/9363 :'(
- sys.modules[self.assertWarns.__module__].__file__,
-
- getattr,
- Validator,
- "DEFAULT_TYPES",
- )
+ validators = dict(self.Validator.VALIDATORS)
+ new = mock.Mock()
- def test_accessing_default_types_on_the_instance_warns(self):
- Validator = validators.create(meta_schema={}, validators={})
- self.assertFalse(self.flushWarnings())
+ Extended = extend(self.Validator, validators={u"a new one": new})
- self.assertWarns(
- DeprecationWarning,
- (
- "The DEFAULT_TYPES attribute is deprecated. "
- "See the type checker attached to this validator instead."
- ),
- # https://tm.tl/9363 :'(
- sys.modules[self.assertWarns.__module__].__file__,
+ validators.update([(u"a new one", new)])
+ self.assertEqual(Extended.VALIDATORS, validators)
+ self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
- getattr,
- Validator({}),
- "DEFAULT_TYPES",
- )
+ self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
+ self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
- def test_providing_types_to_init_warns(self):
- Validator = validators.create(meta_schema={}, validators={})
- self.assertFalse(self.flushWarnings())
- self.assertWarns(
- category=DeprecationWarning,
- message=(
- "The types argument is deprecated. "
- "Provide a type_checker to jsonschema.validators.extend "
- "instead."
- ),
- # https://tm.tl/9363 :'(
- filename=sys.modules[self.assertWarns.__module__].__file__,
-
- f=Validator,
- schema={},
- types={"bar": object},
- )
-
-
-class TestIterErrors(TestCase):
+class TestIterErrors(unittest.TestCase):
def setUp(self):
- self.validator = validators.Draft3Validator({})
+ self.validator = Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
@@ -430,11 +108,11 @@ def test_iter_errors_multiple_failures_one_validator(self):
self.assertEqual(len(errors), 4)
-class TestValidationErrorMessages(TestCase):
+class TestValidationErrorMessages(unittest.TestCase):
def message_for(self, instance, schema, *args, **kwargs):
- kwargs.setdefault("cls", validators.Draft3Validator)
- with self.assertRaises(exceptions.ValidationError) as e:
- validators.validate(instance, schema, *args, **kwargs)
+ kwargs.setdefault("cls", Draft3Validator)
+ with self.assertRaises(ValidationError) as e:
+ validate(instance, schema, *args, **kwargs)
return e.exception.message
def test_single_type_failure(self):
@@ -453,12 +131,13 @@ def test_multiple_type_failure(self):
def test_object_without_title_type_failure(self):
type = {u"type": [{u"minimum": 3}]}
message = self.message_for(instance=1, schema={u"type": [type]})
- self.assertEqual(message, "1 is less than the minimum of 3")
+ self.assertEqual(message, "1 is not of type %r" % (type,))
- def test_object_with_named_type_failure(self):
- schema = {u"type": [{u"name": "Foo", u"minimum": 3}]}
+ def test_object_with_name_type_failure(self):
+ name = "Foo"
+ schema = {u"type": [{u"name": name, u"minimum": 3}]}
message = self.message_for(instance=1, schema=schema)
- self.assertEqual(message, "1 is less than the minimum of 3")
+ self.assertEqual(message, "1 is not of type %r" % (name,))
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum": 2})
@@ -468,98 +147,45 @@ def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum": 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
- def test_dependencies_single_element(self):
+ def test_dependencies_failure_has_single_element_not_list(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: on}}
- message = self.message_for(
- instance={"bar": 2},
- schema=schema,
- cls=validators.Draft3Validator,
- )
- self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
-
- def test_dependencies_list_draft3(self):
- depend, on = "bar", "foo"
- schema = {u"dependencies": {depend: [on]}}
- message = self.message_for(
- instance={"bar": 2},
- schema=schema,
- cls=validators.Draft3Validator,
- )
- self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
-
- def test_dependencies_list_draft7(self):
- depend, on = "bar", "foo"
- schema = {u"dependencies": {depend: [on]}}
- message = self.message_for(
- instance={"bar": 2},
- schema=schema,
- cls=validators.Draft7Validator,
- )
+ message = self.message_for({"bar": 2}, schema)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_additionalItems_single_failure(self):
message = self.message_for(
- instance=[2],
- schema={u"items": [], u"additionalItems": False},
+ [2], {u"items": [], u"additionalItems": False},
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
- instance=[1, 2, 3],
- schema={u"items": [], u"additionalItems": False}
+ [1, 2, 3], {u"items": [], u"additionalItems": False}
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {u"additionalProperties": False}
- message = self.message_for(instance={additional: 2}, schema=schema)
+ message = self.message_for({additional: 2}, schema)
self.assertIn("(%r was unexpected)" % (additional,), message)
def test_additionalProperties_multiple_failures(self):
schema = {u"additionalProperties": False}
- message = self.message_for(
- instance=dict.fromkeys(["foo", "bar"]),
- schema=schema,
- )
+ message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
- def test_const(self):
- schema = {u"const": 12}
- message = self.message_for(
- instance={"foo": "bar"},
- schema=schema,
- cls=validators.Draft6Validator,
- )
- self.assertIn("12 was expected", message)
-
- def test_contains(self):
- schema = {u"contains": {u"const": 12}}
- message = self.message_for(
- instance=[2, {}, []],
- schema=schema,
- cls=validators.Draft6Validator,
- )
- self.assertIn(
- "None of [2, {}, []] are valid under the given schema",
- message,
- )
-
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
- checker.checks(u"thing")(lambda value: False)
+ check_fn = mock.Mock(return_value=False)
+ checker.checks(u"thing")(check_fn)
schema = {u"format": u"thing"}
- message = self.message_for(
- instance="bla",
- schema=schema,
- format_checker=checker,
- )
+ message = self.message_for("bla", schema, format_checker=checker)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
@@ -570,24 +196,18 @@ def test_additionalProperties_false_patternProperties(self):
u"additionalProperties": False,
u"patternProperties": {
u"^abc$": {u"type": u"string"},
- u"^def$": {u"type": u"string"},
+ u"^def$": {u"type": u"string"}
}}
- message = self.message_for(
- instance={u"zebra": 123},
- schema=schema,
- cls=validators.Draft4Validator,
- )
+ message = self.message_for({u"zebra": 123}, schema,
+ cls=Draft4Validator)
self.assertEqual(
message,
"{} does not match any of the regexes: {}, {}".format(
repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"),
),
)
- message = self.message_for(
- instance={u"zebra": 123, u"fish": 456},
- schema=schema,
- cls=validators.Draft4Validator,
- )
+ message = self.message_for({u"zebra": 123, u"fish": 456}, schema,
+ cls=Draft4Validator)
self.assertEqual(
message,
"{}, {} do not match any of the regexes: {}, {}".format(
@@ -595,16 +215,8 @@ def test_additionalProperties_false_patternProperties(self):
),
)
- def test_False_schema(self):
- message = self.message_for(
- instance="something",
- schema=False,
- cls=validators.Draft7Validator,
- )
- self.assertIn("False schema does not allow 'something'", message)
-
-class TestValidationErrorDetails(TestCase):
+class TestValidationErrorDetails(unittest.TestCase):
# TODO: These really need unit tests for each individual validator, rather
# than just these higher level tests.
def test_anyOf(self):
@@ -616,7 +228,7 @@ def test_anyOf(self):
],
}
- validator = validators.Draft4Validator(schema)
+ validator = Draft4Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
@@ -685,7 +297,7 @@ def test_type(self):
],
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
@@ -757,7 +369,7 @@ def test_single_nesting(self):
},
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4 = sorted_errors(errors)
@@ -800,7 +412,7 @@ def test_multiple_nesting(self):
},
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
@@ -879,7 +491,7 @@ def test_recursive(self):
},
},
}
- validator = validators.Draft4Validator(schema)
+ validator = Draft4Validator(schema)
e, = validator.iter_errors(instance)
self.assertEqual(e.absolute_path, deque(["root"]))
@@ -937,7 +549,7 @@ def test_additionalProperties(self):
instance = {"bar": "bar", "foo": 2}
schema = {"additionalProperties": {"type": "integer", "minimum": 5}}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
@@ -956,7 +568,7 @@ def test_patternProperties(self):
},
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
@@ -973,7 +585,7 @@ def test_additionalItems(self):
"additionalItems": {"type": "integer", "minimum": 5},
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
@@ -990,7 +602,7 @@ def test_additionalItems_with_items(self):
"additionalItems": {"type": "integer", "minimum": 5},
}
- validator = validators.Draft3Validator(schema)
+ validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
@@ -1000,160 +612,57 @@ def test_additionalItems_with_items(self):
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
- def test_propertyNames(self):
- instance = {"foo": 12}
- schema = {"propertyNames": {"not": {"const": "foo"}}}
-
- validator = validators.Draft7Validator(schema)
- error, = validator.iter_errors(instance)
-
- self.assertEqual(error.validator, "not")
- self.assertEqual(
- error.message,
- "%r is not allowed for %r" % ({"const": "foo"}, "foo"),
- )
- self.assertEqual(error.path, deque([]))
- self.assertEqual(error.schema_path, deque(["propertyNames", "not"]))
-
- def test_if_then(self):
- schema = {
- "if": {"const": 12},
- "then": {"const": 13},
- }
-
- validator = validators.Draft7Validator(schema)
- error, = validator.iter_errors(12)
-
- self.assertEqual(error.validator, "const")
- self.assertEqual(error.message, "13 was expected")
- self.assertEqual(error.path, deque([]))
- self.assertEqual(error.schema_path, deque(["if", "then", "const"]))
-
- def test_if_else(self):
- schema = {
- "if": {"const": 12},
- "else": {"const": 13},
- }
-
- validator = validators.Draft7Validator(schema)
- error, = validator.iter_errors(15)
-
- self.assertEqual(error.validator, "const")
- self.assertEqual(error.message, "13 was expected")
- self.assertEqual(error.path, deque([]))
- self.assertEqual(error.schema_path, deque(["if", "else", "const"]))
-
- def test_boolean_schema_False(self):
- validator = validators.Draft7Validator(False)
- error, = validator.iter_errors(12)
-
- self.assertEqual(
- (
- error.message,
- error.validator,
- error.validator_value,
- error.instance,
- error.schema,
- error.schema_path,
- ),
- (
- "False schema does not allow 12",
- None,
- None,
- 12,
- False,
- deque([]),
- ),
- )
-
- def test_ref(self):
- ref, schema = "someRef", {"additionalProperties": {"type": "integer"}}
- validator = validators.Draft7Validator(
- {"$ref": ref},
- resolver=validators.RefResolver("", {}, store={ref: schema}),
- )
- error, = validator.iter_errors({"foo": "notAnInteger"})
-
- self.assertEqual(
- (
- error.message,
- error.validator,
- error.validator_value,
- error.instance,
- error.absolute_path,
- error.schema,
- error.schema_path,
- ),
- (
- "'notAnInteger' is not of type 'integer'",
- "type",
- "integer",
- "notAnInteger",
- deque(["foo"]),
- {"type": "integer"},
- deque(["additionalProperties", "type"]),
- ),
- )
-
-
-class MetaSchemaTestsMixin(object):
- # TODO: These all belong upstream
- def test_invalid_properties(self):
- with self.assertRaises(exceptions.SchemaError):
- self.Validator.check_schema({"properties": {"test": object()}})
-
- def test_minItems_invalid_string(self):
- with self.assertRaises(exceptions.SchemaError):
- # needs to be an integer
- self.Validator.check_schema({"minItems": "1"})
-
- def test_enum_allows_empty_arrays(self):
- """
- Technically, all the spec says is they SHOULD have elements, not MUST.
-
- See https://github.com/Julian/jsonschema/issues/529.
- """
- self.Validator.check_schema({"enum": []})
-
- def test_enum_allows_non_unique_items(self):
- """
- Technically, all the spec says is they SHOULD be unique, not MUST.
-
- See https://github.com/Julian/jsonschema/issues/529.
- """
- self.Validator.check_schema({"enum": [12, 12]})
+class ValidatorTestMixin(object):
+ def setUp(self):
+ self.instance = mock.Mock()
+ self.schema = {}
+ self.resolver = mock.Mock()
+ self.validator = self.validator_class(self.schema)
-class ValidatorTestMixin(MetaSchemaTestsMixin, object):
def test_valid_instances_are_valid(self):
- schema, instance = self.valid
- self.assertTrue(self.Validator(schema).is_valid(instance))
+ errors = iter([])
+
+ with mock.patch.object(
+ self.validator, "iter_errors", return_value=errors,
+ ):
+ self.assertTrue(
+ self.validator.is_valid(self.instance, self.schema)
+ )
def test_invalid_instances_are_not_valid(self):
- schema, instance = self.invalid
- self.assertFalse(self.Validator(schema).is_valid(instance))
+ errors = iter([mock.Mock()])
+
+ with mock.patch.object(
+ self.validator, "iter_errors", return_value=errors,
+ ):
+ self.assertFalse(
+ self.validator.is_valid(self.instance, self.schema)
+ )
def test_non_existent_properties_are_ignored(self):
- self.Validator({object(): object()}).validate(instance=object())
+ instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
+ validate(instance=instance, schema={my_property: my_value})
def test_it_creates_a_ref_resolver_if_not_provided(self):
- self.assertIsInstance(
- self.Validator({}).resolver,
- validators.RefResolver,
- )
+ self.assertIsInstance(self.validator.resolver, RefResolver)
def test_it_delegates_to_a_ref_resolver(self):
- ref, schema = "someCoolRef", {"type": "integer"}
- resolver = validators.RefResolver("", {}, store={ref: schema})
- validator = self.Validator({"$ref": ref}, resolver=resolver)
+ resolver = RefResolver("", {})
+ schema = {"$ref": mock.Mock()}
+
+ with mock.patch.object(resolver, "resolve") as resolve:
+ resolve.return_value = "url", {"type": "integer"}
+ with self.assertRaises(ValidationError):
+ self.validator_class(schema, resolver=resolver).validate(None)
- with self.assertRaises(exceptions.ValidationError):
- validator.validate(None)
+ resolve.assert_called_once_with(schema["$ref"])
def test_it_delegates_to_a_legacy_ref_resolver(self):
"""
Legacy RefResolvers support only the context manager form of
resolution.
+
"""
class LegacyRefResolver(object):
@@ -1165,388 +674,116 @@ def resolving(this, ref):
resolver = LegacyRefResolver()
schema = {"$ref": "the ref"}
- with self.assertRaises(exceptions.ValidationError):
- self.Validator(schema, resolver=resolver).validate(None)
+ with self.assertRaises(ValidationError):
+ self.validator_class(schema, resolver=resolver).validate(None)
def test_is_type_is_true_for_valid_type(self):
- self.assertTrue(self.Validator({}).is_type("foo", "string"))
+ self.assertTrue(self.validator.is_type("foo", "string"))
def test_is_type_is_false_for_invalid_type(self):
- self.assertFalse(self.Validator({}).is_type("foo", "array"))
+ self.assertFalse(self.validator.is_type("foo", "array"))
def test_is_type_evades_bool_inheriting_from_int(self):
- self.assertFalse(self.Validator({}).is_type(True, "integer"))
- self.assertFalse(self.Validator({}).is_type(True, "number"))
+ self.assertFalse(self.validator.is_type(True, "integer"))
+ self.assertFalse(self.validator.is_type(True, "number"))
- @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
- def test_string_a_bytestring_is_a_string(self):
- self.Validator({"type": "string"}).validate(b"foo")
+ def test_is_type_raises_exception_for_unknown_type(self):
+ with self.assertRaises(UnknownType):
+ self.validator.is_type("foo", object())
- def test_patterns_can_be_native_strings(self):
- """
- See https://github.com/Julian/jsonschema/issues/611.
- """
- self.Validator({"pattern": "foo"}).validate("foo")
-
- def test_it_can_validate_with_decimals(self):
- schema = {"items": {"type": "number"}}
- Validator = validators.extend(
- self.Validator,
- type_checker=self.Validator.TYPE_CHECKER.redefine(
- "number",
- lambda checker, thing: isinstance(
- thing, (int, float, Decimal),
- ) and not isinstance(thing, bool),
- )
- )
- validator = Validator(schema)
- validator.validate([1, 1.1, Decimal(1) / Decimal(8)])
+class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
+ validator_class = Draft3Validator
- invalid = ["foo", {}, [], True, None]
- self.assertEqual(
- [error.instance for error in validator.iter_errors(invalid)],
- invalid,
- )
+ def test_is_type_is_true_for_any_type(self):
+ self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
- def test_it_returns_true_for_formats_it_does_not_know_about(self):
- validator = self.Validator(
- {"format": "carrot"}, format_checker=FormatChecker(),
- )
- validator.validate("bugs")
-
- def test_it_does_not_validate_formats_by_default(self):
- validator = self.Validator({})
- self.assertIsNone(validator.format_checker)
-
- def test_it_validates_formats_if_a_checker_is_provided(self):
- checker = FormatChecker()
- bad = ValueError("Bad!")
-
- @checker.checks("foo", raises=ValueError)
- def check(value):
- if value == "good":
- return True
- elif value == "bad":
- raise bad
- else: # pragma: no cover
- self.fail("What is {}? [Baby Don't Hurt Me]".format(value))
-
- validator = self.Validator(
- {"format": "foo"}, format_checker=checker,
- )
+ def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
+ self.assertTrue(self.validator.is_type(True, "boolean"))
+ self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
- validator.validate("good")
- with self.assertRaises(exceptions.ValidationError) as cm:
- validator.validate("bad")
-
- # Make sure original cause is attached
- self.assertIs(cm.exception.cause, bad)
-
- def test_non_string_custom_type(self):
- non_string_type = object()
- schema = {"type": [non_string_type]}
- Crazy = validators.extend(
- self.Validator,
- type_checker=self.Validator.TYPE_CHECKER.redefine(
- non_string_type,
- lambda checker, thing: isinstance(thing, int),
- )
- )
- Crazy(schema).validate(15)
+ def test_non_string_custom_types(self):
+ schema = {'type': [None]}
+ cls = self.validator_class(schema, types={None: type(None)})
+ cls.validate(None, schema)
- def test_it_properly_formats_tuples_in_errors(self):
- """
- A tuple instance properly formats validation errors for uniqueItems.
- See https://github.com/Julian/jsonschema/pull/224
- """
- TupleValidator = validators.extend(
- self.Validator,
- type_checker=self.Validator.TYPE_CHECKER.redefine(
- "array",
- lambda checker, thing: isinstance(thing, tuple),
- )
- )
- with self.assertRaises(exceptions.ValidationError) as e:
- TupleValidator({"uniqueItems": True}).validate((1, 1))
- self.assertIn("(1, 1) has non-unique elements", str(e.exception))
+class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
+ validator_class = Draft4Validator
-class AntiDraft6LeakMixin(object):
+class TestBuiltinFormats(unittest.TestCase):
"""
- Make sure functionality from draft 6 doesn't leak backwards in time.
- """
-
- def test_True_is_not_a_schema(self):
- with self.assertRaises(exceptions.SchemaError) as e:
- self.Validator.check_schema(True)
- self.assertIn("True is not of type", str(e.exception))
-
- def test_False_is_not_a_schema(self):
- with self.assertRaises(exceptions.SchemaError) as e:
- self.Validator.check_schema(False)
- self.assertIn("False is not of type", str(e.exception))
-
- @unittest.skip(bug(523))
- def test_True_is_not_a_schema_even_if_you_forget_to_check(self):
- resolver = validators.RefResolver("", {})
- with self.assertRaises(Exception) as e:
- self.Validator(True, resolver=resolver).validate(12)
- self.assertNotIsInstance(e.exception, exceptions.ValidationError)
-
- @unittest.skip(bug(523))
- def test_False_is_not_a_schema_even_if_you_forget_to_check(self):
- resolver = validators.RefResolver("", {})
- with self.assertRaises(Exception) as e:
- self.Validator(False, resolver=resolver).validate(12)
- self.assertNotIsInstance(e.exception, exceptions.ValidationError)
-
-
-class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
- Validator = validators.Draft3Validator
- valid = {}, {}
- invalid = {"type": "integer"}, "foo"
-
- def test_any_type_is_valid_for_type_any(self):
- validator = self.Validator({"type": "any"})
- validator.validate(object())
-
- def test_any_type_is_redefinable(self):
- """
- Sigh, because why not.
- """
- Crazy = validators.extend(
- self.Validator,
- type_checker=self.Validator.TYPE_CHECKER.redefine(
- "any", lambda checker, thing: isinstance(thing, int),
- )
- )
- validator = Crazy({"type": "any"})
- validator.validate(12)
- with self.assertRaises(exceptions.ValidationError):
- validator.validate("foo")
+ The built-in (specification-defined) formats do not raise type errors.
- def test_is_type_is_true_for_any_type(self):
- self.assertTrue(self.Validator({}).is_valid(object(), {"type": "any"}))
-
- def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
- self.assertTrue(self.Validator({}).is_type(True, "boolean"))
- self.assertTrue(self.Validator({}).is_valid(True, {"type": "any"}))
-
-
-class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
- Validator = validators.Draft4Validator
- valid = {}, {}
- invalid = {"type": "integer"}, "foo"
+ If an instance or value is not a string, it should be ignored.
+ """
-class TestDraft6Validator(ValidatorTestMixin, TestCase):
- Validator = validators.Draft6Validator
- valid = {}, {}
- invalid = {"type": "integer"}, "foo"
+for format in FormatChecker.checkers:
+ def test(self, format=format):
+ v = Draft4Validator({"format": format}, format_checker=FormatChecker())
+ v.validate(123)
-class TestDraft7Validator(ValidatorTestMixin, TestCase):
- Validator = validators.Draft7Validator
- valid = {}, {}
- invalid = {"type": "integer"}, "foo"
+ name = "test_{0}_ignores_non_strings".format(format)
+ test.__name__ = name
+ setattr(TestBuiltinFormats, name, test)
+ del test # Ugh py.test. Stop discovering top level tests.
-class TestValidatorFor(SynchronousTestCase):
+class TestValidatorFor(unittest.TestCase):
def test_draft_3(self):
schema = {"$schema": "http://json-schema.org/draft-03/schema"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft3Validator,
- )
+ self.assertIs(validator_for(schema), Draft3Validator)
schema = {"$schema": "http://json-schema.org/draft-03/schema#"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft3Validator,
- )
+ self.assertIs(validator_for(schema), Draft3Validator)
def test_draft_4(self):
schema = {"$schema": "http://json-schema.org/draft-04/schema"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft4Validator,
- )
+ self.assertIs(validator_for(schema), Draft4Validator)
schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft4Validator,
- )
-
- def test_draft_6(self):
- schema = {"$schema": "http://json-schema.org/draft-06/schema"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft6Validator,
- )
-
- schema = {"$schema": "http://json-schema.org/draft-06/schema#"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft6Validator,
- )
-
- def test_draft_7(self):
- schema = {"$schema": "http://json-schema.org/draft-07/schema"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft7Validator,
- )
-
- schema = {"$schema": "http://json-schema.org/draft-07/schema#"}
- self.assertIs(
- validators.validator_for(schema),
- validators.Draft7Validator,
- )
-
- def test_True(self):
- self.assertIs(
- validators.validator_for(True),
- validators._LATEST_VERSION,
- )
-
- def test_False(self):
- self.assertIs(
- validators.validator_for(False),
- validators._LATEST_VERSION,
- )
+ self.assertIs(validator_for(schema), Draft4Validator)
def test_custom_validator(self):
- Validator = validators.create(
- meta_schema={"id": "meta schema id"},
- version="12",
- id_of=lambda s: s.get("id", ""),
- )
+ Validator = create(meta_schema={"id": "meta schema id"}, version="12")
schema = {"$schema": "meta schema id"}
- self.assertIs(
- validators.validator_for(schema),
- Validator,
- )
-
- def test_custom_validator_draft6(self):
- Validator = validators.create(
- meta_schema={"$id": "meta schema $id"},
- version="13",
- )
- schema = {"$schema": "meta schema $id"}
- self.assertIs(
- validators.validator_for(schema),
- Validator,
- )
+ self.assertIs(validator_for(schema), Validator)
def test_validator_for_jsonschema_default(self):
- self.assertIs(validators.validator_for({}), validators._LATEST_VERSION)
+ self.assertIs(validator_for({}), Draft4Validator)
def test_validator_for_custom_default(self):
- self.assertIs(validators.validator_for({}, default=None), None)
-
- def test_warns_if_meta_schema_specified_was_not_found(self):
- self.assertWarns(
- category=DeprecationWarning,
- message=(
- "The metaschema specified by $schema was not found. "
- "Using the latest draft to validate, but this will raise "
- "an error in the future."
- ),
- # https://tm.tl/9363 :'(
- filename=sys.modules[self.assertWarns.__module__].__file__,
-
- f=validators.validator_for,
- schema={u"$schema": "unknownSchema"},
- default={},
- )
-
- def test_does_not_warn_if_meta_schema_is_unspecified(self):
- validators.validator_for(schema={}, default={}),
- self.assertFalse(self.flushWarnings())
-
+ self.assertIs(validator_for({}, default=None), None)
-class TestValidate(SynchronousTestCase):
- def assertUses(self, schema, Validator):
- result = []
- self.patch(Validator, "check_schema", result.append)
- validators.validate({}, schema)
- self.assertEqual(result, [schema])
+class TestValidate(unittest.TestCase):
def test_draft3_validator_is_chosen(self):
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-03/schema#"},
- Validator=validators.Draft3Validator,
- )
+ schema = {"$schema": "http://json-schema.org/draft-03/schema#"}
+ with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
# Make sure it works without the empty fragment
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-03/schema"},
- Validator=validators.Draft3Validator,
- )
+ schema = {"$schema": "http://json-schema.org/draft-03/schema"}
+ with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
def test_draft4_validator_is_chosen(self):
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-04/schema#"},
- Validator=validators.Draft4Validator,
- )
- # Make sure it works without the empty fragment
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-04/schema"},
- Validator=validators.Draft4Validator,
- )
-
- def test_draft6_validator_is_chosen(self):
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-06/schema#"},
- Validator=validators.Draft6Validator,
- )
- # Make sure it works without the empty fragment
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-06/schema"},
- Validator=validators.Draft6Validator,
- )
-
- def test_draft7_validator_is_chosen(self):
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-07/schema#"},
- Validator=validators.Draft7Validator,
- )
- # Make sure it works without the empty fragment
- self.assertUses(
- schema={"$schema": "http://json-schema.org/draft-07/schema"},
- Validator=validators.Draft7Validator,
- )
-
- def test_draft7_validator_is_the_default(self):
- self.assertUses(schema={}, Validator=validators.Draft7Validator)
-
- def test_validation_error_message(self):
- with self.assertRaises(exceptions.ValidationError) as e:
- validators.validate(12, {"type": "string"})
- self.assertRegexpMatches(
- str(e.exception),
- "(?s)Failed validating u?'.*' in schema.*On instance",
- )
-
- def test_schema_error_message(self):
- with self.assertRaises(exceptions.SchemaError) as e:
- validators.validate(12, {"type": 12})
- self.assertRegexpMatches(
- str(e.exception),
- "(?s)Failed validating u?'.*' in metaschema.*On schema",
- )
+ schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
+ with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+ validate({}, schema)
+ chk_schema.assert_called_once_with(schema)
- def test_it_uses_best_match(self):
- # This is a schema that best_match will recurse into
- schema = {"oneOf": [{"type": "string"}, {"type": "array"}]}
- with self.assertRaises(exceptions.ValidationError) as e:
- validators.validate(12, schema)
- self.assertIn("12 is not of type", str(e.exception))
+ def test_draft4_validator_is_the_default(self):
+ with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+ validate({}, {})
+ chk_schema.assert_called_once_with({})
-class TestRefResolver(SynchronousTestCase):
+class TestRefResolver(unittest.TestCase):
base_uri = ""
stored_uri = "foo://stored"
@@ -1555,20 +792,14 @@ class TestRefResolver(SynchronousTestCase):
def setUp(self):
self.referrer = {}
self.store = {self.stored_uri: self.stored_schema}
- self.resolver = validators.RefResolver(
- self.base_uri, self.referrer, self.store,
- )
+ self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
def test_it_does_not_retrieve_schema_urls_from_the_network(self):
- ref = validators.Draft3Validator.META_SCHEMA["id"]
- self.patch(
- self.resolver,
- "resolve_remote",
- lambda *args, **kwargs: self.fail("Should not have been called!"),
- )
- with self.resolver.resolving(ref) as resolved:
- pass
- self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA)
+ ref = Draft3Validator.META_SCHEMA["id"]
+ with mock.patch.object(self.resolver, "resolve_remote") as remote:
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
+ self.assertFalse(remote.called)
def test_it_resolves_local_refs(self):
ref = "#/properties/foo"
@@ -1578,10 +809,7 @@ def test_it_resolves_local_refs(self):
def test_it_resolves_local_refs_with_id(self):
schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}}
- resolver = validators.RefResolver.from_schema(
- schema,
- id_of=lambda schema: schema.get(u"id", u""),
- )
+ resolver = RefResolver.from_schema(schema)
with resolver.resolving("#/a") as resolved:
self.assertEqual(resolved, schema["a"])
with resolver.resolving("http://bar/schema#/a") as resolved:
@@ -1599,52 +827,27 @@ def test_it_retrieves_unstored_refs_via_requests(self):
ref = "http://bar#baz"
schema = {"baz": 12}
- if "requests" in sys.modules:
- self.addCleanup(
- sys.modules.__setitem__, "requests", sys.modules["requests"],
- )
- sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema})
-
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, 12)
+ with mock.patch("jsonschema.validators.requests") as requests:
+ requests.get.return_value.json.return_value = schema
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, 12)
+ requests.get.assert_called_once_with("http://bar")
def test_it_retrieves_unstored_refs_via_urlopen(self):
ref = "http://bar#baz"
schema = {"baz": 12}
- if "requests" in sys.modules:
- self.addCleanup(
- sys.modules.__setitem__, "requests", sys.modules["requests"],
- )
- sys.modules["requests"] = None
-
- @contextmanager
- def fake_urlopen(url):
- self.assertEqual(url, "http://bar")
- yield BytesIO(json.dumps(schema).encode("utf8"))
-
- self.addCleanup(setattr, validators, "urlopen", validators.urlopen)
- validators.urlopen = fake_urlopen
-
- with self.resolver.resolving(ref) as resolved:
- pass
- self.assertEqual(resolved, 12)
-
- def test_it_retrieves_local_refs_via_urlopen(self):
- with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf:
- self.addCleanup(os.remove, tempf.name)
- json.dump({"foo": "bar"}, tempf)
-
- ref = "file://{}#foo".format(pathname2url(tempf.name))
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, "bar")
+ with mock.patch("jsonschema.validators.requests", None):
+ with mock.patch("jsonschema.validators.urlopen") as urlopen:
+ urlopen.return_value.read.return_value = (
+ json.dumps(schema).encode("utf8"))
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, 12)
+ urlopen.assert_called_once_with("http://bar")
def test_it_can_construct_a_base_uri_from_a_schema(self):
schema = {"id": "foo"}
- resolver = validators.RefResolver.from_schema(
- schema,
- id_of=lambda schema: schema.get(u"id", u""),
- )
+ resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "foo")
self.assertEqual(resolver.resolution_scope, "foo")
with resolver.resolving("") as resolved:
@@ -1658,7 +861,7 @@ def test_it_can_construct_a_base_uri_from_a_schema(self):
def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
schema = {}
- resolver = validators.RefResolver.from_schema(schema)
+ resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "")
self.assertEqual(resolver.resolution_scope, "")
with resolver.resolving("") as resolved:
@@ -1667,96 +870,83 @@ def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
self.assertEqual(resolved, schema)
def test_custom_uri_scheme_handlers(self):
- def handler(url):
- self.assertEqual(url, ref)
- return schema
-
schema = {"foo": "bar"}
ref = "foo://bar"
- resolver = validators.RefResolver("", {}, handlers={"foo": handler})
+ foo_handler = mock.Mock(return_value=schema)
+ resolver = RefResolver("", {}, handlers={"foo": foo_handler})
with resolver.resolving(ref) as resolved:
self.assertEqual(resolved, schema)
+ foo_handler.assert_called_once_with(ref)
def test_cache_remote_on(self):
- response = [object()]
-
- def handler(url):
- try:
- return response.pop()
- except IndexError: # pragma: no cover
- self.fail("Response must not have been cached!")
-
ref = "foo://bar"
- resolver = validators.RefResolver(
- "", {}, cache_remote=True, handlers={"foo": handler},
+ foo_handler = mock.Mock()
+ resolver = RefResolver(
+ "", {}, cache_remote=True, handlers={"foo": foo_handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
+ foo_handler.assert_called_once_with(ref)
def test_cache_remote_off(self):
- response = [object()]
-
- def handler(url):
- try:
- return response.pop()
- except IndexError: # pragma: no cover
- self.fail("Handler called twice!")
-
ref = "foo://bar"
- resolver = validators.RefResolver(
- "", {}, cache_remote=False, handlers={"foo": handler},
+ foo_handler = mock.Mock()
+ resolver = RefResolver(
+ "", {}, cache_remote=False, handlers={"foo": foo_handler},
)
with resolver.resolving(ref):
pass
+ self.assertEqual(foo_handler.call_count, 1)
def test_if_you_give_it_junk_you_get_a_resolution_error(self):
- error = ValueError("Oh no! What's this?")
-
- def handler(url):
- raise error
-
ref = "foo://bar"
- resolver = validators.RefResolver("", {}, handlers={"foo": handler})
- with self.assertRaises(exceptions.RefResolutionError) as err:
+ foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
+ resolver = RefResolver("", {}, handlers={"foo": foo_handler})
+ with self.assertRaises(RefResolutionError) as err:
with resolver.resolving(ref):
- self.fail("Shouldn't get this far!") # pragma: no cover
- self.assertEqual(err.exception, exceptions.RefResolutionError(error))
+ pass
+ self.assertEqual(str(err.exception), "Oh no! What's this?")
def test_helpful_error_message_on_failed_pop_scope(self):
- resolver = validators.RefResolver("", {})
+ resolver = RefResolver("", {})
resolver.pop_scope()
- with self.assertRaises(exceptions.RefResolutionError) as exc:
+ with self.assertRaises(RefResolutionError) as exc:
resolver.pop_scope()
self.assertIn("Failed to pop the scope", str(exc.exception))
-def sorted_errors(errors):
- def key(error):
- return (
- [str(e) for e in error.path],
- [str(e) for e in error.schema_path],
- )
- return sorted(errors, key=key)
+class UniqueTupleItemsMixin(object):
+ """
+ A tuple instance properly formats validation errors for uniqueItems.
+ See https://github.com/Julian/jsonschema/pull/224
-@attr.s
-class ReallyFakeRequests(object):
+ """
- _responses = attr.ib()
+ def test_it_properly_formats_an_error_message(self):
+ validator = self.validator_class(
+ schema={"uniqueItems": True},
+ types={"array": (tuple,)},
+ )
+ with self.assertRaises(ValidationError) as e:
+ validator.validate((1, 1))
+ self.assertIn("(1, 1) has non-unique elements", str(e.exception))
- def get(self, url):
- response = self._responses.get(url)
- if url is None: # pragma: no cover
- raise ValueError("Unknown URL: " + repr(url))
- return _ReallyFakeJSONResponse(json.dumps(response))
+class TestDraft4UniqueTupleItems(UniqueTupleItemsMixin, unittest.TestCase):
+ validator_class = Draft4Validator
-@attr.s
-class _ReallyFakeJSONResponse(object):
- _response = attr.ib()
+class TestDraft3UniqueTupleItems(UniqueTupleItemsMixin, unittest.TestCase):
+ validator_class = Draft3Validator
- def json(self):
- return json.loads(self._response)
+
+def sorted_errors(errors):
+ def key(error):
+ return (
+ [str(e) for e in error.path],
+ [str(e) for e in error.schema_path],
+ )
+ return sorted(errors, key=key)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/validators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/validators.py
old mode 100644
new mode 100755
index 1dc420c7..ee1ec94b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/validators.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonschema/validators.py
@@ -1,107 +1,29 @@
-"""
-Creation and extension of validators, with implementations for existing drafts.
-"""
from __future__ import division
-from warnings import warn
import contextlib
import json
import numbers
-from six import add_metaclass
+try:
+ import requests
+except ImportError:
+ requests = None
-from jsonschema import (
- _legacy_validators,
- _types,
- _utils,
- _validators,
- exceptions,
-)
+from jsonschema import _utils, _validators
from jsonschema.compat import (
- Sequence,
- int_types,
- iteritems,
- lru_cache,
- str_types,
- unquote,
- urldefrag,
- urljoin,
- urlopen,
- urlsplit,
+ Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
+ str_types, int_types, iteritems, lru_cache,
)
+from jsonschema.exceptions import ErrorTree # Backwards compat # noqa: F401
+from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
-# Sigh. https://gitlab.com/pycqa/flake8/issues/280
-# https://github.com/pyga/ebb-lint/issues/7
-# Imported for backwards compatibility.
-from jsonschema.exceptions import ErrorTree
-ErrorTree
-
-
-class _DontDoThat(Exception):
- """
- Raised when a Validators with non-default type checker is misused.
-
- Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
- exist for the unrepresentable cases where DEFAULT_TYPES can't
- represent the type relationship.
- """
-
- def __str__(self):
- return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
+_unset = _utils.Unset()
validators = {}
meta_schemas = _utils.URIDict()
-def _generate_legacy_type_checks(types=()):
- """
- Generate newer-style type checks out of JSON-type-name-to-type mappings.
-
- Arguments:
-
- types (dict):
-
- A mapping of type names to their Python types
-
- Returns:
-
- A dictionary of definitions to pass to `TypeChecker`
- """
- types = dict(types)
-
- def gen_type_check(pytypes):
- pytypes = _utils.flatten(pytypes)
-
- def type_check(checker, instance):
- if isinstance(instance, bool):
- if bool not in pytypes:
- return False
- return isinstance(instance, pytypes)
-
- return type_check
-
- definitions = {}
- for typename, pytypes in iteritems(types):
- definitions[typename] = gen_type_check(pytypes)
-
- return definitions
-
-
-_DEPRECATED_DEFAULT_TYPES = {
- u"array": list,
- u"boolean": bool,
- u"integer": int_types,
- u"null": type(None),
- u"number": numbers.Number,
- u"object": dict,
- u"string": str_types,
-}
-_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
- type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
-)
-
-
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
@@ -117,172 +39,39 @@ def validates(version):
Returns:
- collections.Callable:
+ callable: a class decorator to decorate the validator with the version
- a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
- meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
- if meta_schema_id:
- meta_schemas[meta_schema_id] = cls
+ if u"id" in cls.META_SCHEMA:
+ meta_schemas[cls.META_SCHEMA[u"id"]] = cls
return cls
return _validates
-def _DEFAULT_TYPES(self):
- if self._CREATED_WITH_DEFAULT_TYPES is None:
- raise _DontDoThat()
-
- warn(
- (
- "The DEFAULT_TYPES attribute is deprecated. "
- "See the type checker attached to this validator instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- return self._DEFAULT_TYPES
-
-
-class _DefaultTypesDeprecatingMetaClass(type):
- DEFAULT_TYPES = property(_DEFAULT_TYPES)
-
-
-def _id_of(schema):
- if schema is True or schema is False:
- return u""
- return schema.get(u"$id", u"")
-
-
-def create(
- meta_schema,
- validators=(),
- version=None,
- default_types=None,
- type_checker=None,
- id_of=_id_of,
-):
- """
- Create a new validator class.
-
- Arguments:
-
- meta_schema (collections.Mapping):
-
- the meta schema for the new validator class
-
- validators (collections.Mapping):
-
- a mapping from names to callables, where each callable will
- validate the schema property with the given name.
-
- Each callable should take 4 arguments:
-
- 1. a validator instance,
- 2. the value of the property being validated within the
- instance
- 3. the instance
- 4. the schema
-
- version (str):
-
- an identifier for the version that this validator class will
- validate. If provided, the returned validator class will
- have its ``__name__`` set to include the version, and also
- will have `jsonschema.validators.validates` automatically
- called for the given version.
-
- type_checker (jsonschema.TypeChecker):
-
- a type checker, used when applying the :validator:`type` validator.
-
- If unprovided, a `jsonschema.TypeChecker` will be created
- with a set of default types typical of JSON Schema drafts.
-
- default_types (collections.Mapping):
-
- .. deprecated:: 3.0.0
-
- Please use the type_checker argument instead.
-
- If set, it provides mappings of JSON types to Python types
- that will be converted to functions and redefined in this
- object's `jsonschema.TypeChecker`.
-
- id_of (collections.Callable):
-
- A function that given a schema, returns its ID.
-
- Returns:
-
- a new `jsonschema.IValidator` class
- """
+def create(meta_schema, validators=(), version=None, default_types=None): # noqa: C901, E501
+ if default_types is None:
+ default_types = {
+ u"array": list, u"boolean": bool, u"integer": int_types,
+ u"null": type(None), u"number": numbers.Number, u"object": dict,
+ u"string": str_types,
+ }
- if default_types is not None:
- if type_checker is not None:
- raise TypeError(
- "Do not specify default_types when providing a type checker.",
- )
- _created_with_default_types = True
- warn(
- (
- "The default_types argument is deprecated. "
- "Use the type_checker argument instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- type_checker = _types.TypeChecker(
- type_checkers=_generate_legacy_type_checks(default_types),
- )
- else:
- default_types = _DEPRECATED_DEFAULT_TYPES
- if type_checker is None:
- _created_with_default_types = False
- type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
- elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
- _created_with_default_types = False
- else:
- _created_with_default_types = None
-
- @add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
-
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
- TYPE_CHECKER = type_checker
- ID_OF = staticmethod(id_of)
-
- DEFAULT_TYPES = property(_DEFAULT_TYPES)
- _DEFAULT_TYPES = dict(default_types)
- _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
+ DEFAULT_TYPES = dict(default_types)
def __init__(
- self,
- schema,
- types=(),
- resolver=None,
- format_checker=None,
+ self, schema, types=(), resolver=None, format_checker=None,
):
- if types:
- warn(
- (
- "The types argument is deprecated. Provide "
- "a type_checker to jsonschema.validators.extend "
- "instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
-
- self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
- _generate_legacy_type_checks(types),
- )
+ self._types = dict(self.DEFAULT_TYPES)
+ self._types.update(types)
if resolver is None:
- resolver = RefResolver.from_schema(schema, id_of=id_of)
+ resolver = RefResolver.from_schema(schema)
self.resolver = resolver
self.format_checker = format_checker
@@ -291,25 +80,13 @@ def __init__(
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
- raise exceptions.SchemaError.create_from(error)
+ raise SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
- if _schema is True:
- return
- elif _schema is False:
- yield exceptions.ValidationError(
- "False schema does not allow %r" % (instance,),
- validator=None,
- validator_value=None,
- instance=instance,
- schema=_schema,
- )
- return
-
- scope = id_of(_schema)
+ scope = _schema.get(u"id")
if scope:
self.resolver.push_scope(scope)
try:
@@ -353,10 +130,19 @@ def validate(self, *args, **kwargs):
raise error
def is_type(self, instance, type):
- try:
- return self.TYPE_CHECKER.is_type(instance, type)
- except exceptions.UndefinedTypeCheck:
- raise exceptions.UnknownType(type, instance, self.schema)
+ if type not in self._types:
+ raise UnknownType(type, instance, self.schema)
+ pytypes = self._types[type]
+
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ pytypes = _utils.flatten(pytypes)
+ is_number = any(
+ issubclass(pytype, numbers.Number) for pytype in pytypes
+ )
+ if is_number and bool not in pytypes:
+ return False
+ return isinstance(instance, pytypes)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
@@ -369,76 +155,14 @@ def is_valid(self, instance, _schema=None):
return Validator
-def extend(validator, validators=(), version=None, type_checker=None):
- """
- Create a new validator class by extending an existing one.
-
- Arguments:
-
- validator (jsonschema.IValidator):
-
- an existing validator class
-
- validators (collections.Mapping):
-
- a mapping of new validator callables to extend with, whose
- structure is as in `create`.
-
- .. note::
-
- Any validator callables with the same name as an
- existing one will (silently) replace the old validator
- callable entirely, effectively overriding any validation
- done in the "parent" validator class.
-
- If you wish to instead extend the behavior of a parent's
- validator callable, delegate and call it directly in
- the new validator function by retrieving it using
- ``OldValidator.VALIDATORS["validator_name"]``.
-
- version (str):
-
- a version for the new validator class
-
- type_checker (jsonschema.TypeChecker):
-
- a type checker, used when applying the :validator:`type` validator.
-
- If unprovided, the type checker of the extended
- `jsonschema.IValidator` will be carried along.`
-
- Returns:
-
- a new `jsonschema.IValidator` class extending the one provided
-
- .. note:: Meta Schemas
-
- The new validator class will have its parent's meta schema.
-
- If you wish to change or extend the meta schema in the new
- validator class, modify ``META_SCHEMA`` directly on the returned
- class. Note that no implicit copying is done, so a copy should
- likely be made before modifying it, in order to not affect the
- old validator.
- """
-
+def extend(validator, validators, version=None):
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
-
- if type_checker is None:
- type_checker = validator.TYPE_CHECKER
- elif validator._CREATED_WITH_DEFAULT_TYPES:
- raise TypeError(
- "Cannot extend a validator created with default_types "
- "with a type_checker. Update the validator to use a "
- "type_checker when created."
- )
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
- type_checker=type_checker,
- id_of=validator.ID_OF,
+ default_types=validator.DEFAULT_TYPES,
)
@@ -448,146 +172,62 @@ def extend(validator, validators=(), version=None, type_checker=None):
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
- u"dependencies": _legacy_validators.dependencies_draft3,
- u"disallow": _legacy_validators.disallow_draft3,
- u"divisibleBy": _validators.multipleOf,
- u"enum": _validators.enum,
- u"extends": _legacy_validators.extends_draft3,
- u"format": _validators.format,
- u"items": _legacy_validators.items_draft3_draft4,
- u"maxItems": _validators.maxItems,
- u"maxLength": _validators.maxLength,
- u"maximum": _legacy_validators.maximum_draft3_draft4,
- u"minItems": _validators.minItems,
- u"minLength": _validators.minLength,
- u"minimum": _legacy_validators.minimum_draft3_draft4,
- u"pattern": _validators.pattern,
- u"patternProperties": _validators.patternProperties,
- u"properties": _legacy_validators.properties_draft3,
- u"type": _legacy_validators.type_draft3,
- u"uniqueItems": _validators.uniqueItems,
- },
- type_checker=_types.draft3_type_checker,
- version="draft3",
- id_of=lambda schema: schema.get(u"id", ""),
-)
-
-Draft4Validator = create(
- meta_schema=_utils.load_schema("draft4"),
- validators={
- u"$ref": _validators.ref,
- u"additionalItems": _validators.additionalItems,
- u"additionalProperties": _validators.additionalProperties,
- u"allOf": _validators.allOf,
- u"anyOf": _validators.anyOf,
- u"dependencies": _validators.dependencies,
- u"enum": _validators.enum,
- u"format": _validators.format,
- u"items": _legacy_validators.items_draft3_draft4,
- u"maxItems": _validators.maxItems,
- u"maxLength": _validators.maxLength,
- u"maxProperties": _validators.maxProperties,
- u"maximum": _legacy_validators.maximum_draft3_draft4,
- u"minItems": _validators.minItems,
- u"minLength": _validators.minLength,
- u"minProperties": _validators.minProperties,
- u"minimum": _legacy_validators.minimum_draft3_draft4,
- u"multipleOf": _validators.multipleOf,
- u"not": _validators.not_,
- u"oneOf": _validators.oneOf,
- u"pattern": _validators.pattern,
- u"patternProperties": _validators.patternProperties,
- u"properties": _validators.properties,
- u"required": _validators.required,
- u"type": _validators.type,
- u"uniqueItems": _validators.uniqueItems,
- },
- type_checker=_types.draft4_type_checker,
- version="draft4",
- id_of=lambda schema: schema.get(u"id", ""),
-)
-
-Draft6Validator = create(
- meta_schema=_utils.load_schema("draft6"),
- validators={
- u"$ref": _validators.ref,
- u"additionalItems": _validators.additionalItems,
- u"additionalProperties": _validators.additionalProperties,
- u"allOf": _validators.allOf,
- u"anyOf": _validators.anyOf,
- u"const": _validators.const,
- u"contains": _validators.contains,
u"dependencies": _validators.dependencies,
+ u"disallow": _validators.disallow_draft3,
+ u"divisibleBy": _validators.multipleOf,
u"enum": _validators.enum,
- u"exclusiveMaximum": _validators.exclusiveMaximum,
- u"exclusiveMinimum": _validators.exclusiveMinimum,
+ u"extends": _validators.extends_draft3,
u"format": _validators.format,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
- u"maxProperties": _validators.maxProperties,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
- u"minProperties": _validators.minProperties,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
- u"not": _validators.not_,
- u"oneOf": _validators.oneOf,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
- u"properties": _validators.properties,
- u"propertyNames": _validators.propertyNames,
- u"required": _validators.required,
- u"type": _validators.type,
+ u"properties": _validators.properties_draft3,
+ u"type": _validators.type_draft3,
u"uniqueItems": _validators.uniqueItems,
},
- type_checker=_types.draft6_type_checker,
- version="draft6",
+ version="draft3",
)
-Draft7Validator = create(
- meta_schema=_utils.load_schema("draft7"),
+Draft4Validator = create(
+ meta_schema=_utils.load_schema("draft4"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
- u"allOf": _validators.allOf,
- u"anyOf": _validators.anyOf,
- u"const": _validators.const,
- u"contains": _validators.contains,
+ u"allOf": _validators.allOf_draft4,
+ u"anyOf": _validators.anyOf_draft4,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
- u"exclusiveMaximum": _validators.exclusiveMaximum,
- u"exclusiveMinimum": _validators.exclusiveMinimum,
u"format": _validators.format,
- u"if": _validators.if_,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
- u"maxProperties": _validators.maxProperties,
+ u"maxProperties": _validators.maxProperties_draft4,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
- u"minProperties": _validators.minProperties,
+ u"minProperties": _validators.minProperties_draft4,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
- u"oneOf": _validators.oneOf,
- u"not": _validators.not_,
+ u"not": _validators.not_draft4,
+ u"oneOf": _validators.oneOf_draft4,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
- u"properties": _validators.properties,
- u"propertyNames": _validators.propertyNames,
- u"required": _validators.required,
- u"type": _validators.type,
+ u"properties": _validators.properties_draft4,
+ u"required": _validators.required_draft4,
+ u"type": _validators.type_draft4,
u"uniqueItems": _validators.uniqueItems,
},
- type_checker=_types.draft7_type_checker,
- version="draft7",
+ version="draft4",
)
-_LATEST_VERSION = Draft7Validator
-
class RefResolver(object):
"""
@@ -616,21 +256,16 @@ class RefResolver(object):
A mapping from URI schemes to functions that should be used
to retrieve them
- urljoin_cache (:func:`functools.lru_cache`):
+ urljoin_cache (functools.lru_cache):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
- remote_cache (:func:`functools.lru_cache`):
+ remote_cache (functools.lru_cache):
A cache that will be used for caching the results of
resolved remote URLs.
- Attributes:
-
- cache_remote (bool):
-
- Whether remote refs should be cached after first resolution
"""
def __init__(
@@ -664,7 +299,7 @@ def __init__(
self._remote_cache = remote_cache
@classmethod
- def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
+ def from_schema(cls, schema, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
@@ -676,36 +311,22 @@ def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
Returns:
- `RefResolver`
+ :class:`RefResolver`
+
"""
- return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
+ return cls(schema.get(u"id", u""), schema, *args, **kwargs)
def push_scope(self, scope):
- """
- Enter a given sub-scope.
-
- Treats further dereferences as being performed underneath the
- given scope.
- """
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
- """
- Exit the most recent entered scope.
-
- Treats further dereferences as being performed underneath the
- original scope.
-
- Don't call this method more times than `push_scope` has been
- called.
- """
try:
self._scopes_stack.pop()
except IndexError:
- raise exceptions.RefResolutionError(
+ raise RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`"
@@ -713,24 +334,15 @@ def pop_scope(self):
@property
def resolution_scope(self):
- """
- Retrieve the current resolution scope.
- """
return self._scopes_stack[-1]
@property
def base_uri(self):
- """
- Retrieve the current base URI, not including any fragment.
- """
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
- """
- Temporarily enter the given scope for the duration of the context.
- """
self.push_scope(scope)
try:
yield
@@ -740,15 +352,15 @@ def in_scope(self, scope):
@contextlib.contextmanager
def resolving(self, ref):
"""
- Resolve the given ``ref`` and enter its resolution scope.
-
- Exits the scope on exit of this context manager.
+ Context manager which resolves a JSON ``ref`` and enters the
+ resolution scope of this ref.
Arguments:
ref (str):
The reference to resolve
+
"""
url, resolved = self.resolve(ref)
@@ -759,16 +371,10 @@ def resolving(self, ref):
self.pop_scope()
def resolve(self, ref):
- """
- Resolve the given reference.
- """
url = self._urljoin_cache(self.resolution_scope, ref)
return url, self._remote_cache(url)
def resolve_from_url(self, url):
- """
- Resolve the given remote URL.
- """
url, fragment = urldefrag(url)
try:
document = self.store[url]
@@ -776,7 +382,7 @@ def resolve_from_url(self, url):
try:
document = self.resolve_remote(url)
except Exception as exc:
- raise exceptions.RefResolutionError(exc)
+ raise RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
@@ -788,11 +394,12 @@ def resolve_fragment(self, document, fragment):
document:
- The referent document
+ The referrant document
fragment (str):
a URI fragment to resolve within it
+
"""
fragment = fragment.lstrip(u"/")
@@ -810,7 +417,7 @@ def resolve_fragment(self, document, fragment):
try:
document = document[part]
except (TypeError, LookupError):
- raise exceptions.RefResolutionError(
+ raise RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
@@ -843,31 +450,40 @@ def resolve_remote(self, uri):
The retrieved document
- .. _requests: https://pypi.org/project/requests/
+ .. _requests: http://pypi.python.org/pypi/requests/
+
"""
- try:
- import requests
- except ImportError:
- requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
- elif scheme in [u"http", u"https"] and requests:
+ elif (
+ scheme in [u"http", u"https"] and
+ requests and
+ getattr(requests.Response, "json", None) is not None
+ ):
# Requests has support for detecting the correct encoding of
# json over http
- result = requests.get(uri).json()
+ if callable(requests.Response.json):
+ result = requests.get(uri).json()
+ else:
+ result = requests.get(uri).json
else:
# Otherwise, pass off to urllib and assume utf-8
- with urlopen(uri) as url:
- result = json.loads(url.read().decode("utf-8"))
+ result = json.loads(urlopen(uri).read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
+def validator_for(schema, default=_unset):
+ if default is _unset:
+ default = Draft4Validator
+ return meta_schemas.get(schema.get(u"$schema", u""), default)
+
+
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
@@ -877,14 +493,12 @@ def validate(instance, schema, cls=None, *args, **kwargs):
...
ValidationError: [2, 3, 4] is too long
- :func:`validate` will first verify that the provided schema is
- itself valid, since not doing so can lead to less obvious error
- messages and fail in less obvious or consistent ways.
-
- If you know you have a valid schema already, especially if you
- intend to validate multiple instances with the same schema, you
- likely would prefer using the `IValidator.validate` method directly
- on a specific validator (e.g. ``Draft7Validator.validate``).
+ :func:`validate` will first verify that the provided schema is itself
+ valid, since not doing so can lead to less obvious error messages and fail
+ in less obvious or consistent ways. If you know you have a valid schema
+ already or don't care, you might prefer using the
+ :meth:`~IValidator.validate` method directly on a specific validator
+ (e.g. :meth:`Draft4Validator.validate`).
Arguments:
@@ -897,74 +511,31 @@ def validate(instance, schema, cls=None, *args, **kwargs):
The schema to validate with
- cls (IValidator):
+ cls (:class:`IValidator`):
The class that will be used to validate the instance.
- If the ``cls`` argument is not provided, two things will happen
- in accordance with the specification. First, if the schema has a
- :validator:`$schema` property containing a known meta-schema [#]_
- then the proper validator will be used. The specification recommends
- that all schemas contain :validator:`$schema` properties for this
- reason. If no :validator:`$schema` property is found, the default
- validator class is the latest released draft.
+ If the ``cls`` argument is not provided, two things will happen in
+ accordance with the specification. First, if the schema has a
+ :validator:`$schema` property containing a known meta-schema [#]_ then the
+ proper validator will be used. The specification recommends that all
+ schemas contain :validator:`$schema` properties for this reason. If no
+ :validator:`$schema` property is found, the default validator class is
+ :class:`Draft4Validator`.
- Any other provided positional and keyword arguments will be passed
- on when instantiating the ``cls``.
+ Any other provided positional and keyword arguments will be passed on when
+ instantiating the ``cls``.
Raises:
- `jsonschema.exceptions.ValidationError` if the instance
- is invalid
+ :exc:`ValidationError` if the instance is invalid
- `jsonschema.exceptions.SchemaError` if the schema itself
- is invalid
+ :exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
- .. [#] known by a validator registered with
- `jsonschema.validators.validates`
+ .. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
-
cls.check_schema(schema)
- validator = cls(schema, *args, **kwargs)
- error = exceptions.best_match(validator.iter_errors(instance))
- if error is not None:
- raise error
-
-
-def validator_for(schema, default=_LATEST_VERSION):
- """
- Retrieve the validator class appropriate for validating the given schema.
-
- Uses the :validator:`$schema` property that should be present in the
- given schema to look up the appropriate validator class.
-
- Arguments:
-
- schema (collections.Mapping or bool):
-
- the schema to look at
-
- default:
-
- the default to return if the appropriate validator class
- cannot be determined.
-
- If unprovided, the default is to return the latest supported
- draft.
- """
- if schema is True or schema is False or u"$schema" not in schema:
- return default
- if schema[u"$schema"] not in meta_schemas:
- warn(
- (
- "The metaschema specified by $schema was not found. "
- "Using the latest draft to validate, but this will raise "
- "an error in the future."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
+ cls(schema, *args, **kwargs).validate(instance)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__init__.py
deleted file mode 100644
index 03d88ff4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
- jsonspec
- ~~~~~~~~
-
-"""
-
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__main__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__main__.py
deleted file mode 100644
index 71b440f4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/__main__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .cli import main
-
-if __name__ == '__main__':
- main()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/_version.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/_version.py
deleted file mode 100644
index e2bb2b38..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/_version.py
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by 'versioneer.py' (0.15) from
-# revision-control system data, or from the parent directory name of an
-# unpacked source archive. Distribution tarballs contain a pre-generated copy
-# of this file.
-
-import json
-import sys
-
-version_json = '''
-{
- "dirty": false,
- "error": null,
- "full-revisionid": "f91981724cea0c366bd42a6670eb07bbe31c0e0c",
- "version": "0.10.1"
-}
-''' # END VERSION_JSON
-
-
-def get_versions():
- return json.loads(version_json)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/cli.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/cli.py
deleted file mode 100644
index 3bf692f0..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/cli.py
+++ /dev/null
@@ -1,545 +0,0 @@
-"""
- jsonspec.cli
- ~~~~~~~~~~~~
-
-"""
-
-
-from __future__ import print_function
-
-import argparse
-import logging
-import os
-import stat
-import sys
-import pkg_resources
-from functools import wraps
-from jsonspec import driver
-from textwrap import dedent
-
-try:
- from termcolor import colored
-except ImportError:
-
- def colored(string, *args, **kwargs):
- return string
-
-
-def disable_logging(func):
- return func
- """
- Temporary disable logging.
- """
- handler = logging.NullHandler()
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- logger = logging.getLogger()
- logger.addHandler(handler)
- resp = func(*args, **kwargs)
- logger.removeHandler(handler)
- return resp
- return wrapper
-
-
-def format_output(func):
- return func
- """
- Format output.
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- try:
- response = func(*args, **kwargs)
- except Exception as error:
- print(colored(error, 'red'), file=sys.stderr)
- sys.exit(1)
- else:
- print(response)
- sys.exit(0)
- return wrapper
-
-
-def JSONStruct(string):
- return driver.loads(string)
-
-
-class JSONFile(argparse.FileType):
- def __call__(self, string):
- file = super(JSONFile, self).__call__(string)
- return driver.load(file)
-
-
-def document_arguments(parser):
- group = parser.add_mutually_exclusive_group()
- group.add_argument('--document-json',
- type=JSONStruct,
- help='json structure',
- dest='document_json',
- metavar='')
- group.add_argument('--document-file',
- type=JSONFile('r'),
- help='json filename',
- dest='document_file',
- metavar='')
-
-
-def schema_arguments(parser):
- group = parser.add_mutually_exclusive_group()
- group.add_argument('--schema-json',
- type=JSONStruct,
- help='json structure',
- dest='schema_json',
- metavar='')
- group.add_argument('--schema-file',
- type=JSONFile('r'),
- help='json filename',
- dest='schema_file',
- metavar='')
-
-
-def fragment_arguments(parser):
- group = parser.add_mutually_exclusive_group()
- group.add_argument('--fragment-json',
- type=JSONStruct,
- help='json structure',
- dest='fragment_json',
- metavar='')
- group.add_argument('--fragment-file',
- type=JSONFile('r'),
- help='json filename',
- dest='fragment_file',
- metavar='')
-
-
-def indentation_arguments(parser):
- parser.add_argument('--indent',
- type=int,
- help='return an indented json',
- metavar='')
-
-
-def pointer_arguments(parser):
- parser.add_argument('pointer',
- type=str,
- help='json pointer',
- metavar='')
-
-
-def target_arguments(parser):
- parser.add_argument('-t', '--target-pointer',
- help='target pointer',
- dest='target',
- metavar='')
-
-
-def parse_document(args):
- document = None
- if args.document_json:
- document = args.document_json
- elif args.document_file:
- document = args.document_file
- else:
- mode = os.fstat(0).st_mode
- if stat.S_ISFIFO(mode):
- # cat doc.json | cmd
- document = driver.load(sys.stdin)
- elif stat.S_ISREG(mode):
- # cmd < doc.json
- document = driver.load(sys.stdin)
-
- setattr(args, 'document', document)
- return args
-
-
-def parse_fragment(args):
- setattr(args, 'fragment', args.fragment_json or args.fragment_file)
- return args
-
-
-def parse_pointer(args):
- target = args.pointer
- if target.startswith('#'):
- target = target[1:]
- setattr(args, 'pointer', target)
-
-
-def parse_schema(args):
- setattr(args, 'schema', args.schema_json or args.schema_file)
- return args
-
-
-def parse_target(args):
- target = args.target
- if target.startswith('#'):
- target = target[1:]
- setattr(args, 'target', target)
- if not target:
- raise ValueError('target is required')
-
-
-class Command(object):
- help = None
- description = None
- epilog = None
-
- def __init__(self, parser=None):
- self.parser = parser or argparse.ArgumentParser(
- description=self.description, epilog=self.epilog)
- self.parser.set_defaults(func=self)
- self.arguments(self.parser)
-
- def arguments(self, parser):
- return
-
- def run(self, args=None):
- raise NotImplementedError
-
- def parse_args(self, args):
- return self.parser.parse_args(args)
-
- @disable_logging
- @format_output
- def __call__(self, args=None):
- return self.run(args)
-
-
-class AddCommand(Command):
- """Add a fragment to a json document.
-
- examples::
-
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1' --fragment-file=fragment.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-file=doc.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json < doc.json
- """
-
- help = 'add fragment to a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- fragment_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
- parse_fragment(args)
-
- from jsonspec.operations import add, Error
- from jsonspec.pointer import ParseError
-
- try:
- response = add(args.document, args.pointer, args.fragment)
- return driver.dumps(response, indent=args.indent)
- except Error as error:
- raise Exception(error)
- except ParseError as error:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class CheckCommand(Command):
- """Tests that a value at the target location is equal to a specified value.
-
- examples::
-
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1' --fragment-file=fragment.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-file=doc.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json < doc.json
- """
-
- help = 'check member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- fragment_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
- parse_fragment(args)
-
- from jsonspec.operations import check, Error
- from jsonspec.pointer import ParseError
-
- try:
- if check(args.document, args.pointer, args.fragment):
- return 'It validates'
- else:
- raise Exception('It does not validate')
- except Error as error:
- raise Exception('It does not validate')
- except ParseError as error:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class CopyCommand(Command):
- """Copies the value at a specified location to the target location.
-
- examples::
-
- %(prog)s '#/foo/1' --target='#/foo/2' --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1' --target='#/foo/2'
- %(prog)s '#/foo/1' --target='#/foo/2' --document-file=doc.json
- %(prog)s '#/foo/1' --target='#/foo/2' < doc.json
- """
-
- help = 'copy a member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- target_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
- parse_target(args)
-
- from jsonspec.operations import copy, Error
- from jsonspec.pointer import ParseError
-
- try:
- response = copy(args.document, args.target, args.pointer)
- return driver.dumps(response, indent=args.indent)
- except Error as error:
- raise Exception(error)
- except ParseError as error:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class ExtractCommand(Command):
- """Extract a fragment from a json document.
-
- examples::
-
- %(prog)s '#/foo/1' --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1'
- %(prog)s '#/foo/1' --document-file=doc.json
- %(prog)s '#/foo/1' < doc.json
- """
-
- help = 'extract a member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
-
- from jsonspec.pointer import extract
- from jsonspec.pointer import ExtractError, ParseError
-
- try:
- response = extract(args.document, args.pointer)
- return driver.dumps(response, indent=args.indent)
- except ExtractError:
- raise Exception(args)
- raise Exception('{} does not match'.format(args.pointer))
- except ParseError:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class MoveCommand(Command):
- """Removes the value at a specified location and adds it to the target location.
-
- examples::
-
- %(prog)s '#/foo/2' --target='#/foo/1' --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/2' --target='#/foo/1'
- %(prog)s '#/foo/2' --target='#/foo/1' --document-file=doc.json
- %(prog)s '#/foo/2' --target='#/foo/1' < doc.json
- """
-
- help = 'move a member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- target_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_target(args)
- parse_document(args)
-
- from jsonspec.operations import move, Error
- from jsonspec.pointer import ParseError
-
- try:
- response = move(args.document, args.target, args.pointer)
- return driver.dumps(response, indent=args.indent)
- except Error as error:
- raise Exception(error)
- except ParseError as error:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class RemoveCommand(Command):
- """Replace the value of pointer.
-
- examples:
- %(prog)s '#/foo/1' --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1'
- %(prog)s '#/foo/1' --document-file=doc.json
- %(prog)s '#/foo/1' < doc.json
-
- """
-
- help = 'remove a member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
-
- from jsonspec.operations import remove, Error
- from jsonspec.pointer import ParseError
-
- try:
- response = remove(args.document, args.pointer)
- return driver.dumps(response, indent=args.indent)
- except Error:
- raise Exception('{} does not match'.format(args.pointer))
- except ParseError:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class ReplaceCommand(Command):
- """Replace a fragment to a json document.
-
- examples::
-
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s '#/foo/1' --fragment-file=fragment.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json --document-file=doc.json
- %(prog)s '#/foo/1' --fragment-file=fragment.json < doc.json
- """
-
- help = 'replace a member of a document'
-
- def arguments(self, parser):
- pointer_arguments(parser)
- document_arguments(parser)
- fragment_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_pointer(args)
- parse_document(args)
- parse_fragment(args)
-
- from jsonspec.operations import replace, Error
- from jsonspec.pointer import ParseError
-
- try:
- response = replace(args.document, args.pointer, args.fragment)
- return driver.dumps(response, indent=args.indent)
- except Error as error:
- raise Exception(error)
- except ParseError as error:
- raise Exception('{} is not a valid pointer'.format(args.pointer))
-
-
-class ValidateCommand(Command):
- """Validate document against a schema.
-
- examples::
-
- %(prog)s --schema-file=schema.json --document-json='{"foo": ["bar", "baz"]}'
- echo '{"foo": ["bar", "baz"]}' | %(prog)s --schema-file=schema.json
- %(prog)s --schema-file=schema.json --document-file=doc.json
- %(prog)s --schema-file=schema.json < doc.json
- """
-
- help = 'validate a document against a schema'
-
- def arguments(self, parser):
- document_arguments(parser)
- schema_arguments(parser)
- indentation_arguments(parser)
-
- def run(self, args):
- parse_document(args)
- parse_schema(args)
-
- from jsonspec.validators import load
- from jsonspec.validators import ValidationError
-
- try:
- validated = load(args.schema).validate(args.document)
- return driver.dumps(validated, indent=args.indent)
- except ValidationError as error:
- msg = 'document does not validate with schema.\n\n'
- for pointer, reasons in error.flatten().items():
- msg += ' {}\n'.format(pointer)
- for reason in reasons:
- msg += ' - reason {}\n'.format(reason)
- msg += '\n'
- raise Exception(msg)
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawDescriptionHelpFormatter)
-
- parser.add_argument('--version',
- action='version',
- version='%(prog)s 0.9.11')
-
- subparsers = parser.add_subparsers(help='choose one of these actions',
- dest='action',
- metavar='')
- subparsers.required = True
- cmds = []
- for entrypoint in pkg_resources.iter_entry_points('jsonspec.cli.commands'):
- logging.debug('loaded %s from %s', entrypoint, entrypoint.dist)
- cmds.append((entrypoint.name, entrypoint.load()))
-
- for name, command_class in sorted(cmds):
- description, help, epilog = None, None, None
- if command_class.__doc__:
- description, _, epilog = command_class.__doc__.lstrip().partition('\n\n') # noqa
-
- if description:
- description = description.replace('\n', ' ')
-
- if epilog:
- epilog = dedent(epilog).replace(' ', ' ').replace('::\n\n', ':\n') # noqa
- description = command_class.description or description
- epilog = command_class.epilog or epilog
- help = command_class.help or description
- subparser = subparsers.add_parser(name,
- help=help,
- description=description,
- epilog=epilog,
- formatter_class=argparse.RawDescriptionHelpFormatter) # noqa
-
- command = command_class(subparser) # noqa
- return parser
-
-
-def main():
- logging.basicConfig()
-
- parser = get_parser()
- args = parser.parse_args()
- args.func(args)
-
-
-if __name__ == '__main__':
- main()
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/driver.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/driver.py
deleted file mode 100644
index 1263aea4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/driver.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
- jsonspec.driver
- ~~~~~~~~~~~~~~~
-
-"""
-
-
-try:
- from simplejson import * # noqa
-except ImportError:
- from json import * # noqa
-
-from functools import wraps
-
-__all__ = ['load', 'loads', 'dump', 'dumps']
-
-_load = load
-_loads = loads
-
-
-@wraps(_load)
-def load(fp, *args, **kwargs):
- return _load(fp, *args, **kwargs)
-load.original = _load
-
-
-@wraps(_loads)
-def loads(s, *args, **kwargs):
- return _loads(s, *args, **kwargs)
-loads.original = _loads
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/card.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/card.json
deleted file mode 100644
index 9e84cb06..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/card.json
+++ /dev/null
@@ -1,48 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-03/schema#",
- "description": "A representation of a person, company, organization, or place",
- "type": "object",
- "properties": {
- "fn": {
- "description": "Formatted Name",
- "type": "string"
- },
- "familyName": { "type": "string", "required": true },
- "givenName": { "type": "string", "required": true },
- "additionalName": { "type": "array", "items": { "type": "string" } },
- "honorificPrefix": { "type": "array", "items": { "type": "string" } },
- "honorificSuffix": { "type": "array", "items": { "type": "string" } },
- "nickname": { "type": "string" },
- "url": { "type": "string", "format": "uri" },
- "email": {
- "type": "object",
- "properties": {
- "type": { "type": "string" },
- "value": { "type": "string", "format": "email" }
- }
- },
- "tel": {
- "type": "object",
- "properties": {
- "type": { "type": "string" },
- "value": { "type": "string", "format": "phone" }
- }
- },
- "adr": { "$ref": "http://json-schema.org/address" },
- "geo": { "$ref": "http://json-schema.org/geo" },
- "tz": { "type": "string" },
- "photo": { "type": "string" },
- "logo": { "type": "string" },
- "sound": { "type": "string" },
- "bday": { "type": "string", "format": "date" },
- "title": { "type": "string" },
- "role": { "type": "string" },
- "org": {
- "type": "object",
- "properties": {
- "organizationName": { "type": "string" },
- "organizationUnit": { "type": "string" }
- }
- }
- }
-}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/hyper-schema.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/hyper-schema.json
deleted file mode 100644
index 0661eab5..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/hyper-schema.json
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-03/hyper-schema#",
- "extends": {"$ref": "http://json-schema.org/draft-03/schema#"},
- "id": "http://json-schema.org/draft-03/hyper-schema#",
-
- "properties": {
- "links": {
- "type": "array",
- "items": {"$ref": "http://json-schema.org/draft-03/links#"}
- },
- "fragmentResolution": {
- "type": "string",
- "default": "slash-delimited"
- },
- "root": {
- "type": "boolean",
- "default": false
- },
- "readonly": {
- "type": "boolean",
- "default": false
- },
- "contentEncoding": {
- "type": "string"
- },
- "pathStart": {
- "type": "string",
- "format": "uri"
- },
- "mediaType": {
- "type": "string",
- "format": "media-type"
- }
- },
- "links": [
- {
- "href": "{id}",
- "rel": "self"
- },
- {
- "href": "{$ref}",
- "rel": "full"
- },
- {
- "href": "{$schema}",
- "rel": "describedby"
- }
- ],
- "fragmentResolution": "slash-delimited"
-}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/json-ref.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/json-ref.json
deleted file mode 100644
index f25cbabd..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/json-ref.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-03/hyper-schema#",
- "id": "http://json-schema.org/draft-03/json-ref#",
- "additionalItems": {"$ref": "#"},
- "additionalProperties": {"$ref": "#"},
- "links": [
- {
- "href": "{id}",
- "rel": "self"
- },
- {
- "href": "{$ref}",
- "rel": "full"
- },
- {
- "href": "{$schema}",
- "rel": "describedby"
- }
- ],
- "fragmentResolution": "dot-delimited"
-}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/links.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/links.json
deleted file mode 100644
index f25cbabd..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/links.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-03/hyper-schema#",
- "id": "http://json-schema.org/draft-03/json-ref#",
- "additionalItems": {"$ref": "#"},
- "additionalProperties": {"$ref": "#"},
- "links": [
- {
- "href": "{id}",
- "rel": "self"
- },
- {
- "href": "{$ref}",
- "rel": "full"
- },
- {
- "href": "{$schema}",
- "rel": "describedby"
- }
- ],
- "fragmentResolution": "dot-delimited"
-}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/schema.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/schema.json
deleted file mode 100644
index 7e15b09c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-03/schema.json
+++ /dev/null
@@ -1,142 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-03/schema#",
- "id": "http://json-schema.org/draft-03/schema#",
- "type": "object",
- "properties": {
- "type": {
- "type": ["string", "array"],
- "items": {
- "type": ["string", {"$ref": "#"}]
- },
- "uniqueItems": true,
- "default": "any"
- },
- "properties": {
- "type": "object",
- "additionalProperties": {"$ref": "#"},
- "default": {}
- },
- "patternProperties": {
- "type": "object",
- "additionalProperties": {"$ref": "#"},
- "default": {}
- },
- "additionalProperties": {
- "type": [{"$ref": "#"}, "boolean"],
- "default": {}
- },
- "items": {
- "type": [{"$ref": "#"}, "array"],
- "items": {"$ref": "#"},
- "default": {}
- },
- "additionalItems": {
- "type": [{"$ref": "#"}, "boolean"],
- "default": {}
- },
- "required": {
- "type": "boolean",
- "default": false
- },
- "dependencies": {
- "type": "object",
- "additionalProperties": {
- "type": ["string", "array", {"$ref": "#"}],
- "items": {
- "type": "string"
- }
- },
- "default": {}
- },
- "minimum": {
- "type": "number"
- },
- "maximum": {
- "type": "number"
- },
- "exclusiveMinimum": {
- "type": "boolean",
- "default": false
- },
- "exclusiveMaximum": {
- "type": "boolean",
- "default": false
- },
- "minItems": {
- "type": "integer",
- "minimum": 0,
- "default": 0
- },
- "maxItems": {
- "type": "integer",
- "minimum": 0
- },
- "uniqueItems": {
- "type": "boolean",
- "default": false
- },
- "pattern": {
- "type": "string",
- "format": "regex"
- },
- "minLength": {
- "type": "integer",
- "minimum": 0,
- "default": 0
- },
- "maxLength": {
- "type": "integer"
- },
- "enum": {
- "type": "array",
- "minItems": 1,
- "uniqueItems": true
- },
- "default": {
- "type": "any"
- },
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "format": {
- "type": "string"
- },
- "divisibleBy": {
- "type": "number",
- "minimum": 0,
- "exclusiveMinimum": true
- },
- "disallow": {
- "type": ["string", "array"],
- "items": {
- "type": ["string", {"$ref": "#"}]
- },
- "uniqueItems": true
- },
- "extends": {
- "type": [{"$ref": "#"}, "array"],
- "items": {"$ref": "#"},
- "default": {}
- },
- "id": {
- "type": "string",
- "format": "uri"
- },
- "$ref": {
- "type": "string",
- "format": "uri"
- },
- "$schema": {
- "type": "string",
- "format": "uri"
- }
- },
- "dependencies": {
- "exclusiveMinimum": "minimum",
- "exclusiveMaximum": "maximum"
- },
- "default": {}
-}
\ No newline at end of file
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/hyper-schema.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/hyper-schema.json
deleted file mode 100644
index 853bd35f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/hyper-schema.json
+++ /dev/null
@@ -1,168 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-04/hyper-schema#",
- "id": "http://json-schema.org/draft-04/hyper-schema#",
- "title": "JSON Hyper-Schema",
- "allOf": [
- {
- "$ref": "http://json-schema.org/draft-04/schema#"
- }
- ],
- "properties": {
- "additionalItems": {
- "anyOf": [
- {
- "type": "boolean"
- },
- {
- "$ref": "#"
- }
- ]
- },
- "additionalProperties": {
- "anyOf": [
- {
- "type": "boolean"
- },
- {
- "$ref": "#"
- }
- ]
- },
- "dependencies": {
- "additionalProperties": {
- "anyOf": [
- {
- "$ref": "#"
- },
- {
- "type": "array"
- }
- ]
- }
- },
- "items": {
- "anyOf": [
- {
- "$ref": "#"
- },
- {
- "$ref": "#/definitions/schemaArray"
- }
- ]
- },
- "definitions": {
- "additionalProperties": {
- "$ref": "#"
- }
- },
- "patternProperties": {
- "additionalProperties": {
- "$ref": "#"
- }
- },
- "properties": {
- "additionalProperties": {
- "$ref": "#"
- }
- },
- "allOf": {
- "$ref": "#/definitions/schemaArray"
- },
- "anyOf": {
- "$ref": "#/definitions/schemaArray"
- },
- "oneOf": {
- "$ref": "#/definitions/schemaArray"
- },
- "not": {
- "$ref": "#"
- },
-
- "links": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/linkDescription"
- }
- },
- "fragmentResolution": {
- "type": "string"
- },
- "media": {
- "type": "object",
- "properties": {
- "type": {
- "description": "A media type, as described in RFC 2046",
- "type": "string"
- },
- "binaryEncoding": {
- "description": "A content encoding scheme, as described in RFC 2045",
- "type": "string"
- }
- }
- },
- "pathStart": {
- "description": "Instances' URIs must start with this value for this schema to apply to them",
- "type": "string",
- "format": "uri"
- }
- },
- "definitions": {
- "schemaArray": {
- "type": "array",
- "items": {
- "$ref": "#"
- }
- },
- "linkDescription": {
- "title": "Link Description Object",
- "type": "object",
- "required": [ "href", "rel" ],
- "properties": {
- "href": {
- "description": "a URI template, as defined by RFC 6570, with the addition of the $, ( and ) characters for pre-processing",
- "type": "string"
- },
- "rel": {
- "description": "relation to the target resource of the link",
- "type": "string"
- },
- "title": {
- "description": "a title for the link",
- "type": "string"
- },
- "targetSchema": {
- "description": "JSON Schema describing the link target",
- "$ref": "#"
- },
- "mediaType": {
- "description": "media type (as defined by RFC 2046) describing the link target",
- "type": "string"
- },
- "method": {
- "description": "method for requesting the target of the link (e.g. for HTTP this might be \"GET\" or \"DELETE\")",
- "type": "string"
- },
- "encType": {
- "description": "The media type in which to submit data along with the request",
- "type": "string",
- "default": "application/json"
- },
- "schema": {
- "description": "Schema describing the data to submit along with the request",
- "$ref": "#"
- }
- }
- }
- },
- "links": [
- {
- "rel": "self",
- "href": "{+id}"
- },
- {
- "rel": "full",
- "href": "{+($ref)}"
- }
- ]
-}
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/schema.json b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/schema.json
deleted file mode 100644
index 85eb502a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/misc/schemas/draft-04/schema.json
+++ /dev/null
@@ -1,150 +0,0 @@
-{
- "id": "http://json-schema.org/draft-04/schema#",
- "$schema": "http://json-schema.org/draft-04/schema#",
- "description": "Core schema meta-schema",
- "definitions": {
- "schemaArray": {
- "type": "array",
- "minItems": 1,
- "items": { "$ref": "#" }
- },
- "positiveInteger": {
- "type": "integer",
- "minimum": 0
- },
- "positiveIntegerDefault0": {
- "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
- },
- "simpleTypes": {
- "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
- },
- "stringArray": {
- "type": "array",
- "items": { "type": "string" },
- "minItems": 1,
- "uniqueItems": true
- }
- },
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "format": "uri"
- },
- "$schema": {
- "type": "string",
- "format": "uri"
- },
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "default": {},
- "multipleOf": {
- "type": "number",
- "minimum": 0,
- "exclusiveMinimum": true
- },
- "maximum": {
- "type": "number"
- },
- "exclusiveMaximum": {
- "type": "boolean",
- "default": false
- },
- "minimum": {
- "type": "number"
- },
- "exclusiveMinimum": {
- "type": "boolean",
- "default": false
- },
- "maxLength": { "$ref": "#/definitions/positiveInteger" },
- "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
- "pattern": {
- "type": "string",
- "format": "regex"
- },
- "additionalItems": {
- "anyOf": [
- { "type": "boolean" },
- { "$ref": "#" }
- ],
- "default": {}
- },
- "items": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/schemaArray" }
- ],
- "default": {}
- },
- "maxItems": { "$ref": "#/definitions/positiveInteger" },
- "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
- "uniqueItems": {
- "type": "boolean",
- "default": false
- },
- "maxProperties": { "$ref": "#/definitions/positiveInteger" },
- "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
- "required": { "$ref": "#/definitions/stringArray" },
- "additionalProperties": {
- "anyOf": [
- { "type": "boolean" },
- { "$ref": "#" }
- ],
- "default": {}
- },
- "definitions": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "properties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "patternProperties": {
- "type": "object",
- "additionalProperties": { "$ref": "#" },
- "default": {}
- },
- "dependencies": {
- "type": "object",
- "additionalProperties": {
- "anyOf": [
- { "$ref": "#" },
- { "$ref": "#/definitions/stringArray" }
- ]
- }
- },
- "enum": {
- "type": "array",
- "minItems": 1,
- "uniqueItems": true
- },
- "type": {
- "anyOf": [
- { "$ref": "#/definitions/simpleTypes" },
- {
- "type": "array",
- "items": { "$ref": "#/definitions/simpleTypes" },
- "minItems": 1,
- "uniqueItems": true
- }
- ]
- },
- "allOf": { "$ref": "#/definitions/schemaArray" },
- "anyOf": { "$ref": "#/definitions/schemaArray" },
- "oneOf": { "$ref": "#/definitions/schemaArray" },
- "not": { "$ref": "#" }
- },
- "dependencies": {
- "exclusiveMaximum": [ "maximum" ],
- "exclusiveMinimum": [ "minimum" ]
- },
- "default": {}
-}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/__init__.py
deleted file mode 100644
index 3c07783e..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/__init__.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
- jsonspec.operations
- ~~~~~~~~~~~~~~~~~~~
-"""
-
-from __future__ import absolute_import
-
-__all__ = ['check', 'remove', 'add', 'replace', 'move', 'copy',
- 'Error', 'NonexistentTarget', 'Target']
-
-from .exceptions import Error, NonexistentTarget
-from .bases import Target
-
-
-def check(doc, pointer, expected, raise_onerror=False):
- """Check if value exists into object.
-
- :param doc: the document base
- :param pointer: the path to search in
- :param expected: the expected value
- :param raise_onerror: should raise on error?
- :return: boolean
- """
- return Target(doc).check(pointer, expected, raise_onerror)
-
-
-def remove(doc, pointer):
- """Remove element from sequence, member from mapping.
-
- :param doc: the document base
- :param pointer: the path to search in
- :return: the new object
- """
-
- return Target(doc).remove(pointer).document
-
-
-def add(doc, pointer, value):
- """Add element to sequence, member to mapping.
-
- :param doc: the document base
- :param pointer: the path to add in it
- :param value: the new value
- :return: the new object
- """
- return Target(doc).add(pointer, value).document
-
-
-def replace(doc, pointer, value):
- """Replace element from sequence, member from mapping.
-
- :param doc: the document base
- :param pointer: the path to search in
- :param value: the new value
- :return: the new object
-
- .. note::
-
- This operation is functionally identical to a "remove" operation for
- a value, followed immediately by an "add" operation at the same
- location with the replacement value.
- """
-
- return Target(doc).replace(pointer, value).document
-
-
-def move(doc, dest, src):
- """Move element from sequence, member from mapping.
-
- :param doc: the document base
- :param dest: the destination
- :type dest: Pointer
- :param src: the source
- :type src: Pointer
- :return: the new object
-
- .. note::
-
- it delete then it add to the new location
- soo the dest must refer to the middle object.
-
- """
-
- return Target(doc).move(dest, src).document
-
-
-def copy(doc, dest, src):
- """Copy element from sequence, member from mapping.
-
- :param doc: the document base
- :param dest: the destination
- :type dest: Pointer
- :param src: the source
- :type src: Pointer
- :return: the new object
- """
-
- return Target(doc).copy(dest, src).document
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/bases.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/bases.py
deleted file mode 100644
index 8c68edb0..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/bases.py
+++ /dev/null
@@ -1,203 +0,0 @@
-"""
- jsonspec.operations.bases
- ~~~~~~~~~~~~~~~~~~~~~~~~~
-"""
-
-from __future__ import absolute_import
-
-__all__ = ['Target']
-
-from copy import deepcopy
-import logging
-from jsonspec.pointer import Pointer
-from collections import Mapping, MutableSequence
-from jsonspec.pointer import ExtractError, OutOfBounds, OutOfRange, LastElement
-from .exceptions import Error, NonexistentTarget
-logger = logging.getLogger(__name__)
-
-
-class Target(object):
- """
-
- :ivar document: the document base
- """
-
- def __init__(self, document):
- self.document = document
-
- def check(self, pointer, expected, raise_onerror=False):
- """Check if value exists into object.
-
- :param pointer: the path to search in
- :param expected: the expected value
- :param raise_onerror: should raise on error?
- :return: boolean
- """
- obj = self.document
- for token in Pointer(pointer):
- try:
- obj = token.extract(obj, bypass_ref=True)
- except ExtractError as error:
- if raise_onerror:
- raise Error(*error.args)
- logger.exception(error)
- return False
- return obj == expected
-
- def remove(self, pointer):
- """Remove element from sequence, member from mapping.
-
- :param pointer: the path to search in
- :return: resolved document
- :rtype: Target
- """
- doc = deepcopy(self.document)
- parent, obj = None, doc
- try:
- # fetching
- for token in Pointer(pointer):
- parent, obj = obj, token.extract(obj, bypass_ref=True)
-
- # removing
- if isinstance(parent, Mapping):
- del parent[token]
-
- if isinstance(parent, MutableSequence):
- parent.pop(int(token))
- except Exception as error:
- raise Error(*error.args)
-
- return Target(doc)
-
- def add(self, pointer, value):
- """Add element to sequence, member to mapping.
-
- :param pointer: the path to add in it
- :param value: the new value
- :return: resolved document
- :rtype: Target
-
-
- The pointer must reference one of:
-
- - The root of the target document - whereupon the specified value
- becomes the entire content of the target document.
-
- - A member to add to an existing mapping - whereupon the supplied
- value is added to that mapping at the indicated location. If the
- member already exists, it is replaced by the specified value.
-
- - An element to add to an existing sequence - whereupon the supplied
- value is added to the sequence at the indicated location.
- Any elements at or above the specified index are shifted one
- position to the right.
- The specified index must no be greater than the number of elements
- in the sequence.
- If the "-" character is used to index the end of the sequence, this
- has the effect of appending the value to the sequence.
-
- """
- doc = deepcopy(self.document)
- parent, obj = None, doc
- try:
- for token in Pointer(pointer):
- parent, obj = obj, token.extract(obj, bypass_ref=True)
- else:
- if isinstance(parent, MutableSequence):
- raise OutOfRange(parent)
- if isinstance(parent, Mapping):
- raise OutOfBounds(parent)
- raise Error('already setted')
- except (OutOfBounds, OutOfRange, LastElement) as error:
- if not token.last:
- raise NonexistentTarget(obj)
- value = deepcopy(value)
- if isinstance(error, OutOfBounds):
- error.obj[str(token)] = value
- elif isinstance(error, OutOfRange):
- error.obj.insert(int(token), value)
- elif isinstance(error, LastElement):
- error.obj.append(value)
-
- return Target(doc)
-
- def replace(self, pointer, value):
- """Replace element from sequence, member from mapping.
-
- :param pointer: the path to search in
- :param value: the new value
- :return: resolved document
- :rtype: Target
- """
- doc = deepcopy(self.document)
- parent, obj = None, doc
- try:
- # fetching
- for token in Pointer(pointer):
- parent, obj = obj, token.extract(obj, bypass_ref=True)
-
- # replace
- value = deepcopy(value)
- if isinstance(parent, Mapping):
- parent[token] = value
-
- if isinstance(parent, MutableSequence):
- parent[int(token)] = value
- except Exception as error:
- raise Error(*error.args)
-
- return Target(doc)
-
- def move(self, dest, src):
- """Move element from sequence, member from mapping.
-
- :param dest: the destination
- :type dest: Pointer
- :param src: the source
- :type src: Pointer
- :return: resolved document
- :rtype: Target
-
- .. note::
-
- This operation is functionally identical to a "remove" operation on
- the "from" location, followed immediately by an "add" operation at
- the target location with the value that was just removed.
-
- The "from" location MUST NOT be a proper prefix of the "path"
- location; i.e., a location cannot be moved into one of its children
-
- """
-
- doc = deepcopy(self.document)
-
- # delete
- parent, fragment = None, doc
- for token in Pointer(src):
- parent, fragment = fragment, token.extract(fragment,
- bypass_ref=True)
-
- if isinstance(parent, Mapping):
- del parent[token]
-
- if isinstance(parent, MutableSequence):
- parent.pop(int(token))
-
- # insert
- return Target(doc).add(dest, fragment)
-
- def copy(self, dest, src):
- """Copy element from sequence, member from mapping.
-
- :param dest: the destination
- :type dest: Pointer
- :param src: the source
- :type src: Pointer
- :return: resolved document
- :rtype: Target
- """
- doc = fragment = deepcopy(self.document)
- for token in Pointer(src):
- fragment = token.extract(fragment, bypass_ref=True)
-
- return Target(doc).add(dest, fragment)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/exceptions.py
deleted file mode 100644
index 528a101b..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/operations/exceptions.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
- jsonspec.operations.exceptions
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-"""
-
-from __future__ import absolute_import
-
-__all__ = ['Error', 'NonexistentTarget']
-
-
-class Error(LookupError):
- pass
-
-
-class NonexistentTarget(Error):
- """Raised when trying to get a non existent target"""
- pass
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/__init__.py
deleted file mode 100644
index 8ff45517..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- jsonspec.pointer
- ~~~~~~~~~~~~
-
- JSON Pointer defines a string syntax for identifying a specific value
- within a JavaScript Object Notation (JSON) document.
-
-"""
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-__all__ = ['extract', 'stage', 'DocumentPointer', 'Pointer', 'PointerToken',
- 'ExtractError', 'RefError', 'LastElement', 'OutOfBounds', 'OutOfRange'] # noqa
-
-import logging
-from .bases import DocumentPointer, Pointer, PointerToken
-from .exceptions import ExtractError, RefError, LastElement, OutOfBounds, OutOfRange, WrongType, UnstagedError, ParseError # noqa
-from .stages import stage
-
-logger = logging.getLogger(__name__)
-
-
-def extract(obj, pointer, bypass_ref=False):
- """Extract member or element of obj according to pointer.
-
- :param obj: the object source
- :param pointer: the pointer
- :type pointer: Pointer, str
- :param bypass_ref: bypass JSON Reference event
- :type bypass_ref: boolean
- """
-
- return Pointer(pointer).extract(obj, bypass_ref)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/bases.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/bases.py
deleted file mode 100644
index c7328676..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/bases.py
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
- jsonspec.pointer.bases
- ~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-
-__all__ = ['DocumentPointer', 'Pointer', 'PointerToken']
-
-import logging
-from abc import abstractmethod, ABCMeta
-from six import add_metaclass, string_types
-from collections import Mapping, Sequence, MutableSequence
-from .exceptions import ExtractError, RefError, LastElement, OutOfBounds, OutOfRange, WrongType, UnstagedError, ParseError # noqa
-
-logger = logging.getLogger(__name__)
-
-
-class DocumentPointer(object):
- """Defines a document pointer
-
- :ivar document: document name
- :ivar pointer: pointer
- """
-
- def __init__(self, pointer):
- """
- :param pointer: a string or DocumentPointer instance
- """
- if isinstance(pointer, DocumentPointer):
- document, path = pointer
- elif '#' not in pointer:
- logger.debug('# is missing %r', pointer)
- document, path = pointer, ''
- else:
- document, path = pointer.split('#', 1)
- self.document = document
- self.pointer = Pointer(path)
-
- def extract(self, obj, bypass_ref=False):
- """
- Extract subelement from obj, according to pointer.
- It assums that document is the object.
-
- :param obj: the object source
- :param bypass_ref: disable JSON Reference errors
- """
- return self.pointer.extract(obj, bypass_ref)
-
- def is_inner(self):
- """Tells if pointer refers to an inner document
- """
- return self.document == ''
-
- def endswith(self, txt):
- """used by os.path.join"""
- return str(self).endswith(txt)
-
- def __iadd__(self, txt):
- """append fragments"""
- data = str(self) + txt
- return DocumentPointer(data)
-
- def __iter__(self):
- """Return document and pointer.
- """
- return iter([self.document, self.pointer])
-
- def __eq__(self, other):
- if isinstance(other, string_types):
- return other == self.__str__()
- return super(Pointer, self).__eq__(other)
-
- def __str__(self):
- return '{}#{}'.format(self.document, self.pointer)
-
- def __repr__(self):
- return ''.format(self.__class__.__name__, self.__str__())
-
-
-@add_metaclass(ABCMeta)
-class PointerToken(str):
- """
- A single token
- """
-
- @abstractmethod
- def extract(self, obj, bypass_ref=False):
- """
- Extract parents or subelement from obj, according to current token.
-
- :param obj: the object source
- :param bypass_ref: disable JSON Reference errors
- """
- pass
-
-
-class StagesToken(PointerToken):
- """
- A parent token
- """
-
- def __init__(self, value, *args, **kwargs):
- value = str(value)
- member = False
- if value.endswith('#'):
- value = value[:-1]
- member = True
- self.stages = int(value)
- self.member = member
-
- def extract(self, obj, bypass_ref=False):
- """
- Extract parent of obj, according to current token.
-
- :param obj: the object source
- :param bypass_ref: not used
- """
- for i in range(0, self.stages):
- try:
- obj = obj.parent_obj
- except AttributeError:
- raise UnstagedError(obj, '{!r} must be staged before '
- 'exploring its parents'.format(obj))
- if self.member:
- return obj.parent_member
- return obj
-
-
-class ChildToken(PointerToken):
- """
- A child token
- """
- def extract(self, obj, bypass_ref=False):
- """
- Extract subelement from obj, according to current token.
-
- :param obj: the object source
- :param bypass_ref: disable JSON Reference errors
- """
- try:
- if isinstance(obj, Mapping):
- if not bypass_ref and '$ref' in obj:
- raise RefError(obj, 'presence of a $ref member')
- obj = self.extract_mapping(obj)
- elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
- obj = self.extract_sequence(obj)
- else:
- raise WrongType(obj, '{!r} does not apply '
- 'for {!r}'.format(str(self), obj))
-
- if isinstance(obj, Mapping):
- if not bypass_ref and '$ref' in obj:
- raise RefError(obj, 'presence of a $ref member')
- return obj
- except ExtractError as error:
- logger.exception(error)
- raise
- except Exception as error:
- logger.exception(error)
- args = [arg for arg in error.args if arg not in (self, obj)]
- raise ExtractError(obj, *args)
-
- def extract_mapping(self, obj):
- if self in obj:
- return obj[self]
-
- if self.isdigit():
- key = int(self)
- if key in obj:
- return obj[key]
-
- raise OutOfBounds(obj, 'member {!r} not found'.format(str(self)))
-
- def extract_sequence(self, obj):
- if self == '-':
- raise LastElement(obj, 'last element is needed')
- if not self.isdigit():
- raise WrongType(obj, '{!r} does not apply '
- 'for sequence'.format(str(self)))
- try:
- return obj[int(self)]
- except IndexError:
- raise OutOfRange(obj, 'element {!r} not found'.format(str(self)))
-
- def __repr__(self):
- return '<{}({!r})>'.format(self.__class__.__name__, str(self))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/exceptions.py
deleted file mode 100644
index a840e6a4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/exceptions.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
- jsonspec.pointer.exceptions
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-__all__ = ['ExtractError', 'RefError', 'LastElement', 'OutOfBounds',
- 'OutOfRange', 'WrongType', 'UnstagedError']
-
-
-class ParseError(ValueError):
- """Raised when pointer is not well formatted.
-
- :ivar pointer: the faulty pointer
- """
-
- def __init__(self, pointer, *args):
- super(ParseError, self).__init__(*args)
- self.pointer = pointer
-
-
-class ExtractError(Exception):
- """Raised for any errors.
-
- :ivar obj: the object that raised this event
- """
-
- def __init__(self, obj, *args):
- super(ExtractError, self).__init__(*args)
- self.obj = obj
-
-
-class RefError(ExtractError):
- """Raised when encoutered a JSON Ref.
-
- :ivar obj: the object that raised this event
- """
-
-
-class WrongType(ExtractError, ValueError):
- """Raised when a member or a sequence is needed.
-
- :ivar obj: the object that raised this event
- """
-
-
-class OutOfBounds(ExtractError, KeyError):
- """Raised when a member of a mapping does not exists.
-
- :ivar obj: the object that raised this event
- """
-
-
-class OutOfRange(ExtractError, IndexError):
- """Raised when an element of a sequence does not exists.
-
- :ivar obj: the object that raised this event
- """
-
-
-class LastElement(ExtractError):
- """Raised when refers to the last element of a sequence.
-
- :ivar obj: the object that raised this event
- """
-
-
-class UnstagedError(ExtractError, ValueError):
- """Raised when obj is not staged.
-
- :ivar obj: the object that raised this event
- """
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/stages.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/stages.py
deleted file mode 100644
index dc8f28fe..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/pointer/stages.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
- jsonspec.pointer.stages
- ~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-from six import string_types
-from collections import Mapping, Sequence, Set
-
-class Staged(object):
- obj = None
- parent_obj = None
- parent_member = None
-
- def __init__(self, obj, parent=None, member=None):
- self.obj = obj
- self.parent_obj = parent
- self.parent_member = member
-
- def __getattribute__(self, name):
- if name in ('obj', 'parent_obj', 'parent_member'):
- return object.__getattribute__(self, name)
- return getattr(object.__getattribute__(self, 'obj'), name)
-
- def __delattr__(self, name):
- delattr(object.__getattribute__(self, 'obj'), name)
-
- def __setattr__(self, name, value):
- if name in ('obj', 'parent_obj', 'parent_member'):
- object.__setattr__(self, name, value)
- else:
- setattr(object.__getattribute__(self, 'obj'), name, value)
-
- def __iter__(self):
- return object.__getattribute__(self, 'obj').__iter__()
-
- def __getitem__(self, key):
- value = object.__getattribute__(self, 'obj').__getitem__(key)
- return Staged(value, self, key)
-
- def __len__(self):
- return object.__getattribute__(self, 'obj').__len__()
-
- def __eq__(self, other):
- return object.__getattribute__(self, 'obj') == other
-
- def __str__(self):
- return object.__getattribute__(self, 'obj').__str__()
-
-
-def stage(obj, parent=None, member=None):
- """
- Prepare obj to be staged.
-
- This is almost used for relative JSON Pointers.
- """
- obj = Staged(obj, parent, member)
-
- if isinstance(obj, Mapping):
- for key, value in obj.items():
- stage(value, obj, key)
- elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
- for index, value in enumerate(obj):
- stage(value, obj, index)
- elif isinstance(obj, Set):
- for value in obj:
- stage(value, obj, None)
-
- return obj
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/__init__.py
deleted file mode 100644
index d4af9613..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/__init__.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
- jsonspec.reference
- ~~~~~~~~~~~~~~~~~~
-
- A JSON Reference is a JSON object, which contains a member named
- "$ref", which has a JSON string value. Example:
-
- { "$ref": "http://example.com/example.json#/foo/bar" }
-
- If a JSON value does not have these characteristics, then it SHOULD
- NOT be interpreted as a JSON Reference.
-
-"""
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-from .bases import Registry, LocalRegistry
-from .exceptions import NotFound, Forbidden
-from jsonspec.pointer import DocumentPointer
-
-__all__ = ['resolve', 'Registry', 'LocalRegistry', 'NotFound', 'Forbidden']
-
-
-def resolve(obj, pointer, registry=None):
- """resolve a local object
-
- :param obj: the local object.
- :param pointer: the pointer
- :type pointer: DocumentPointer, str
- :param registry: the registry.
- It mays be omited if inner json references
- document don't refer to other documents.
- :type registry: Provider, dict
-
- .. warning::
-
- Once pointer is extracted, it won't follow sub mapping /element!
- For example, the value of::
-
- value = resolve({
- 'foo': {'$ref': '#/bar'},
- 'bar': [{'$ref': '#/baz'}],
- 'baz': 'quux',
- }, '#/foo')
-
- is::
-
- assert value == [{'$ref': '#/baz'}]
-
- and not::
-
- assert value == ['quux']
-
- """
-
- registry = LocalRegistry(obj, registry or {})
- local = DocumentPointer(pointer)
-
- if local.document:
- registry[local.document] = obj
- local.document = ''
- return registry.resolve(local)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/bases.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/bases.py
deleted file mode 100644
index 3815fde8..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/bases.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
- jsonspec.reference.bases
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-import logging
-from .exceptions import NotFound, Forbidden
-from .util import ref, MutableMapping, Mapping
-from jsonspec.pointer import DocumentPointer
-
-__all__ = ['LocalRegistry', 'Registry']
-
-logger = logging.getLogger(__name__)
-
-
-class Provider(Mapping):
- """Defines a generic way to provide external documents"""
- pass
-
-
-class Registry(Provider, MutableMapping):
- """Register all documents.
-
- :ivar provider: all documents
- :ivar provider: Provider, dict
- """
-
- def __init__(self, provider=None):
- self.provider = provider or {}
- super(Registry, self).__init__()
-
- def prototype(self, dp):
- obj = self[dp.document]
- return self[dp.document], LocalRegistry(obj, self)
-
- def resolve(self, pointer):
- """Resolve from documents.
-
- :param pointer: foo
- :type pointer: DocumentPointer
- """
-
- dp = DocumentPointer(pointer)
- obj, fetcher = self.prototype(dp)
-
- for token in dp.pointer:
- obj = token.extract(obj, bypass_ref=True)
- reference = ref(obj)
- if reference:
- obj = fetcher.resolve(reference)
- return obj
-
- def __getitem__(self, uri):
- try:
- return self.provider[uri]
- except KeyError:
- raise NotFound('{!r} not registered'.format(uri))
-
- def __setitem__(self, uri, obj):
- self.provider[uri] = obj
-
- def __delitem__(self, uri):
- del self.provider[uri]
-
- def __len__(self):
- return len(self.provider)
-
- def __iter__(self):
- return iter(self.provider)
-
-
-class LocalRegistry(Registry):
- """Scoped registry to a local document.
-
- :ivar doc: the local document
- :ivar provider: all documents
- :ivar provider: Provider, dict
- :ivar key: current document identifier
-
- """
-
- key = ''
-
- def __init__(self, doc, provider=None):
- self.doc = doc
- self.provider = provider or {}
-
- def prototype(self, dp):
- if dp.is_inner():
- return self.doc, self
- else:
- obj = self[dp.document]
- return self[dp.document], LocalRegistry(obj, self)
-
- def __getitem__(self, uri):
- try:
- return self.doc if uri == self.key else self.provider[uri]
- except (NotFound, KeyError):
- raise NotFound('{!r} not registered'.format(uri))
-
- def __setitem__(self, uri, obj):
- if uri == self.key:
- raise Forbidden('setting {} is forbidden'.format(self.key))
- if uri not in self.provider:
- self.provider[uri] = obj
-
- def __delitem__(self, uri):
- if uri == self.key:
- raise Forbidden('deleting {} is forbidden'.format(self.key))
- del self.provider[uri]
-
- def __len__(self):
- return len(set(list(self.provider.keys()) + [self.key]))
-
- def __iter__(self):
- yield self.key
- for key in self.provider.keys():
- yield key
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/exceptions.py
deleted file mode 100644
index 551d9a09..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/exceptions.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
- jsonspec.reference.exceptions
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-
-class NotFound(Exception):
- """raises when a document is not found"""
- pass
-
-
-class Forbidden(object):
- """raises when a trying to replace document"""
- pass
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/providers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/providers.py
deleted file mode 100644
index 556f6fae..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/providers.py
+++ /dev/null
@@ -1,191 +0,0 @@
-"""
- jsonspec.reference.providers
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-import json
-import logging
-import os
-import pkg_resources
-from .bases import Provider
-from .exceptions import NotFound
-from pathlib import Path
-
-__all__ = ['Provider', 'FilesystemProvider', 'PkgProvider', 'SpecProvider']
-
-logger = logging.getLogger(__name__)
-
-
-class PkgProvider(Provider):
- """
- Autoload providers declared into setuptools ``entry_points``.
-
- For example, with this setup.cfg:
-
- .. code-block:: ini
-
- [entry_points]
- jsonspec.reference.contributions =
- spec = jsonspec.misc.providers:SpecProvider
-
- """
-
- namespace = 'jsonspec.reference.contributions'
-
- def __init__(self, namespace=None, configuration=None):
- self.namespace = namespace or self.namespace
- self.configuration = configuration or {}
- self.loaded = False
-
- def load(self):
- providers = {}
- for entrypoint in pkg_resources.iter_entry_points(self.namespace):
- kwargs = self.configuration.get(entrypoint.name, {})
- providers[entrypoint.name] = entrypoint.load()(**kwargs)
- logger.debug('loaded %s from %s', entrypoint, entrypoint.dist)
- self.providers = providers
- self.loaded = True
-
- def __getitem__(self, uri):
- if not self.loaded:
- self.load()
-
- for name, provider in self.providers.items():
- try:
- value = provider[uri]
- logger.info('got %s from %s', uri, name)
- return value
- except (KeyError, NotFound):
- pass
- raise NotFound('no providers could return {!r}'.format(uri))
-
- def __iter__(self):
- if not self.loaded:
- self.load()
- for name in self.providers.keys():
- yield name
-
- def __len__(self):
- if not self.loaded:
- self.load()
- return len(self.providers)
-
-
-class FilesystemProvider(Provider):
- """
- Exposes json documents stored into filesystem.
-
- for example, with ``prefix=my:pref:`` and ``directory=my/directory``,
- this filesystem will be loaded as::
-
- my/directory/
- foo.json -> my:pref:foo#
- bar.json -> my:pref:bar#
- baz/
- quux.json -> my:pref:baz/quux#
-
- """
-
- def __init__(self, directory, prefix=None, aliases=None):
- self.directory = directory
- self.prefix = prefix or ''
- self.loaded = False
- self.aliases = aliases or {}
-
- def _spec_name(self, schema, filename):
- # Let's assume the schema knows its name more accurately than
- # its path can provide.
- if schema.get('id'):
- return schema['id']
- else:
- return filename.as_posix()[len(self.directory):-5].lstrip('/')
-
- @property
- def data(self):
- if not self.loaded:
- data = {}
-
- for filename in Path(self.directory).glob('**/*.json'):
- with filename.open() as file:
- schema = json.load(file)
-
- # Let's assume the schema knows its name more accurately than
- # its path can provide.
- spec = self._spec_name(schema, filename)
- data[spec] = schema
- # set the fallbacks
- for spec in sorted(data.keys(), reverse=True):
- if spec.startswith('draft-'):
- metaspec = spec.split('/', 1)[1]
- if metaspec not in data:
- data[metaspec] = data[spec]
-
- self._data = data
- self.loaded = True
- return self._data
-
- def __getitem__(self, uri):
- spec = uri
- if uri.startswith(self.prefix):
- spec = uri[len(self.prefix):]
- if spec.endswith('#'):
- spec = spec[:-1]
-
- spec = self.aliases.get(spec, spec)
- try:
- return self.data[spec]
- except (KeyError, UnboundLocalError):
- raise NotFound(uri)
-
- def __iter__(self):
- for spec in self.data.keys():
- yield '{}{}#'.format(self.prefix, spec)
-
- def __len__(self):
- return len(self.data.keys())
-
-
-class SpecProvider(FilesystemProvider):
- """
- Provides specs of http://json-schema.org/
- """
-
- def __init__(self):
- from jsonspec.misc import __file__ as misc
- base = os.path.realpath(os.path.dirname(misc))
- src = os.path.join(base, 'schemas/')
- prefix = 'http://json-schema.org/'
- super(SpecProvider, self).__init__(src, prefix, aliases={
- 'hyper-schema': 'draft-04/hyper-schema',
- 'schema': 'draft-04/schema'
- })
-
- def _spec_name(self, schema, filename):
- return filename.as_posix()[len(self.directory):-5].lstrip('/')
-
-
-class ProxyProvider(Provider):
- def __init__(self, provider):
- self.provider = provider
- self.local = {}
-
- def __getitem__(self, uri):
- try:
- return self.local[uri]
- except KeyError:
- return self.provider[uri]
-
- def __setitem__(self, uri, schema):
- self.local[uri] = schema
-
- def __iter__(self):
- keys = set(self.local.keys())
- keys.update(self.provider.keys())
- for key in sorted(keys):
- yield key
-
- def __len__(self):
- keys = set(self.local.keys())
- keys.update(self.provider.keys())
- return len(keys)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/util.py
deleted file mode 100644
index 8ccd277a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/reference/util.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
- jsonspec.reference.util
- ~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-__all__ = ['ref', 'Mapping', 'MutableMapping']
-
-
-def ref(obj):
- """Extracts $ref of object."""
- try:
- return obj['$ref']
- except (KeyError, TypeError):
- return None
-
-
-try:
- # py3
- from collections.abc import MutableMapping, Mapping
-except ImportError:
- # py2
- from collections import MutableMapping, Mapping
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/__init__.py
deleted file mode 100644
index 18004519..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/__init__.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
- jsonspec.validators
- ~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from .bases import Validator, ReferenceValidator
-from .exceptions import CompilationError, ReferenceError, ValidationError
-from .factorize import register, Factory, Context
-from . import draft04 # noqa
-from . import draft03 # noqa
-from .draft03 import Draft03Validator # noqa
-from .draft04 import Draft04Validator # noqa
-
-__all__ = ['load', 'register', 'Factory', 'Context',
- 'Validator', 'ReferenceValidator',
- 'Draft03Validator', 'Draft04Validator',
- 'CompilationError', 'ReferenceError', 'ValidationError']
-
-
-def load(schema, uri=None, spec=None, provider=None):
- """Scaffold a validator against a schema.
-
- :param schema: the schema to compile into a Validator
- :type schema: Mapping
- :param uri: the uri of the schema.
- it may be ignored in case of not cross
- referencing.
- :type uri: Pointer, str
- :param spec: fallback to this spec if the schema does not provides ts own
- :type spec: str
- :param provider: the other schemas, in case of cross
- referencing
- :type provider: Mapping, Provider...
- """
- factory = Factory(provider, spec)
- return factory(schema, uri or '#')
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/bases.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/bases.py
deleted file mode 100644
index f0d67e45..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/bases.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""
- jsonspec.validators.bases
- ~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from __future__ import absolute_import
-
-import logging
-from abc import abstractmethod, ABCMeta
-from six import add_metaclass
-from jsonspec.pointer import DocumentPointer
-from .exceptions import ValidationError
-
-__all__ = ['ValidationError', 'Validator', 'ReferenceValidator']
-
-logger = logging.getLogger(__name__)
-
-
-@add_metaclass(ABCMeta)
-class Validator(object):
- """
- The mother of Validators.
- """
- #: indicates current uri
- uri = None
-
- default = None
-
- def __init__(self, **attrs):
- self.uri = attrs.pop('uri', None)
-
- @abstractmethod
- def has_default(self):
- pass
-
- @abstractmethod
- def is_optional(self):
- """
- Indicates if the instance must be defined.
- """
- pass
-
- @abstractmethod
- def validate(self, obj, pointer=None):
- """
- Validate object.
-
- :param obj: the object to validate
- :param pointer: the object pointer
- """
- pass
-
- def __call__(self, obj, pointer=None):
- """shortcut for validate()"""
- return self.validate(obj, pointer)
-
-
-class ReferenceValidator(Validator):
- """
- Reference a validator to his pointer.
-
- :ivar pointer: the pointer to the validator
- :ivar context: the context object
- :ivar default: return the default validator
- :ivar validator: return the lazy loaded validator
-
- >>> validator = ReferenceValidator('http://json-schema.org/geo#', context)
- >>> assert validator({
- >>> 'latitude': 0.0124,
- >>> 'longitude': 1.2345
- >>> })
- """
- def __init__(self, pointer, context):
- super(ReferenceValidator, self).__init__()
- self.pointer = DocumentPointer(pointer)
- self.context = context
- self.uri = str(self.pointer)
-
- @property
- def validator(self):
- if not hasattr(self, '_validator'):
- self._validator = self.context.resolve(self.pointer)
- return self._validator
-
- def has_default(self):
- return self.validator.has_default()
-
- @property
- def default(self):
- return self.validator.default
-
- def is_optional(self):
- return self.validator.is_optional()
-
- def validate(self, obj, pointer=None):
- """
- Validate object against validator.
-
- :param obj: the object to validate
- :param pointer: the object pointer
- """
- return self.validator.validate(obj, pointer)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft03.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft03.py
deleted file mode 100644
index b08f15d8..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft03.py
+++ /dev/null
@@ -1,665 +0,0 @@
-"""
- jsonspec.validators.draft03
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Implements JSON Schema draft03.
-"""
-
-from __future__ import absolute_import
-
-import logging
-import re
-from copy import deepcopy
-from decimal import Decimal
-from six import integer_types, string_types
-from six.moves.urllib.parse import urljoin
-from .bases import ReferenceValidator, Validator
-from .exceptions import CompilationError
-from .factorize import register
-from jsonspec.validators.exceptions import ValidationError
-from jsonspec.validators.util import uncamel
-from jsonspec.validators.pointer_util import pointer_join
-from jsonspec import driver as json
-
-__all__ = ['compile', 'Draft03Validator']
-
-sequence_types = (list, set, tuple)
-number_types = (integer_types, float, Decimal)
-logger = logging.getLogger(__name__)
-
-
-@register(spec='http://json-schema.org/draft-03/schema#')
-def compile(schema, pointer, context, scope=None):
- """
- Compiles schema with `JSON Schema`_ draft-03.
-
- :param schema: obj to compile
- :type schema: Mapping
- :param pointer: uri of the schema
- :type pointer: Pointer, str
- :param context: context of this schema
- :type context: Context
-
- .. _`JSON Schema`: http://json-schema.org
- """
-
- schm = deepcopy(schema)
-
- scope = urljoin(scope or str(pointer), schm.pop('id', None))
-
- if '$ref' in schema:
- return ReferenceValidator(urljoin(scope, schema['$ref']), context)
-
- attrs = {}
-
- if 'additionalItems' in schm:
- subpointer = pointer_join(pointer, 'additionalItems')
- attrs['additional_items'] = schm.pop('additionalItems')
- if isinstance(attrs['additional_items'], dict):
- compiled = compile(attrs['additional_items'],
- subpointer,
- context,
- scope)
- attrs['additional_items'] = compiled
- elif not isinstance(attrs['additional_items'], bool):
- raise CompilationError('wrong type for {}'.format('additional_items'), schema) # noqa
-
- if 'additionalProperties' in schm:
- attrs['additional_properties'] = schm.pop('additionalProperties')
- if isinstance(attrs['additional_properties'], dict):
- subpointer = pointer_join(pointer, 'additionalProperties')
- value = attrs['additional_properties']
- attrs['additional_properties'] = compile(value,
- subpointer,
- context,
- scope)
- elif not isinstance(attrs['additional_properties'], bool):
- raise CompilationError('additionalProperties must be an object or boolean', schema) # noqa
-
- if 'dependencies' in schm:
- attrs['dependencies'] = schm.pop('dependencies')
- if not isinstance(attrs['dependencies'], dict):
- raise CompilationError('dependencies must be an object', schema)
- for key, value in attrs['dependencies'].items():
- if isinstance(value, dict):
- subpointer = pointer_join(pointer, 'dependencies', key)
- attrs['dependencies'][key] = compile(value,
- subpointer,
- context,
- scope)
- elif isinstance(value, sequence_types):
- continue
- elif not isinstance(value, string_types):
- raise CompilationError('dependencies must be an array, object or string', schema) # noqa
-
- if 'disallow' in schm:
- attrs['disallow'] = schm.pop('disallow')
- if isinstance(attrs['disallow'], sequence_types):
- for index, value in enumerate(attrs['disallow']):
- if isinstance(value, dict):
- subpointer = pointer_join(pointer, 'disallow', index)
- attrs['disallow'][index] = compile(value,
- subpointer,
- context,
- scope)
- elif not isinstance(value, string_types):
- raise CompilationError('disallow must be an object or string', schema) # noqa
- elif not isinstance(attrs['disallow'], string_types):
- raise CompilationError('disallow must be an array or string', schema) # noqa
-
- if 'divisibleBy' in schm:
- attrs['divisible_by'] = schm.pop('divisibleBy')
- if not isinstance(attrs['divisible_by'], number_types):
- raise CompilationError('divisibleBy must be a number', schema)
-
- if 'enum' in schm:
- attrs['enum'] = schm.pop('enum')
- if not isinstance(attrs['enum'], sequence_types):
- raise CompilationError('enum must be a sequence', schema)
-
- if 'exclusiveMaximum' in schm:
- attrs['exclusive_maximum'] = schm.pop('exclusiveMaximum')
- if not isinstance(attrs['exclusive_maximum'], bool):
- raise CompilationError('exclusiveMaximum must be a boolean', schema) # noqa
-
- if 'exclusiveMinimum' in schm:
- attrs['exclusive_minimum'] = schm.pop('exclusiveMinimum')
- if not isinstance(attrs['exclusive_minimum'], bool):
- raise CompilationError('exclusiveMinimum must be a boolean', schema) # noqa
-
- if 'extends' in schm:
- attrs['extends'] = schm.pop('extends')
- subpointer = pointer_join(pointer, 'extends')
- if isinstance(attrs['extends'], dict):
- attrs['extends'] = compile(attrs['extends'],
- subpointer,
- context,
- scope)
- elif isinstance(attrs['extends'], sequence_types):
- for index, value in enumerate(attrs['extends']):
- attrs['extends'][index] = compile(value,
- subpointer,
- context,
- scope)
- else:
- raise CompilationError('extends must be an object or array', schema) # noqa
-
- if 'format' in schm:
- attrs['format'] = schm.pop('format')
- if not isinstance(attrs['format'], string_types):
- raise CompilationError('format must be a string', schema)
-
- if 'items' in schm:
- subpointer = pointer_join(pointer, 'items')
- attrs['items'] = schm.pop('items')
- if isinstance(attrs['items'], (list, tuple)):
- # each value must be a json schema
- attrs['items'] = [compile(element, subpointer, context, scope) for element in attrs['items']] # noqa
- elif isinstance(attrs['items'], dict):
- # value must be a json schema
- attrs['items'] = compile(attrs['items'], subpointer, context, scope) # noqa
- else:
- # should be a boolean
- raise CompilationError('wrong type for {}'.format('items'), schema) # noqa
-
- if 'maximum' in schm:
- attrs['maximum'] = schm.pop('maximum')
- if not isinstance(attrs['maximum'], number_types):
- raise CompilationError('enum must be an integer', schema)
-
- if 'maxItems' in schm:
- attrs['max_items'] = schm.pop('maxItems')
- if not isinstance(attrs['max_items'], integer_types):
- raise CompilationError('maxItems must be an integer', schema)
-
- if 'maxLength' in schm:
- attrs['max_length'] = schm.pop('maxLength')
- if not isinstance(attrs['max_length'], integer_types):
- raise CompilationError('maxLength must be integer', schema)
-
- if 'minimum' in schm:
- attrs['minimum'] = schm.pop('minimum')
- if not isinstance(attrs['minimum'], number_types):
- raise CompilationError('enum must be a number', schema)
-
- if 'minItems' in schm:
- attrs['min_items'] = schm.pop('minItems')
- if not isinstance(attrs['min_items'], integer_types):
- raise CompilationError('minItems must be an integer', schema)
-
- if 'minLength' in schm:
- attrs['min_length'] = schm.pop('minLength')
- if not isinstance(attrs['min_length'], integer_types):
- raise CompilationError('minLength must be integer', schema)
-
- if 'pattern' in schm:
- attrs['pattern'] = schm.pop('pattern')
- if not isinstance(attrs['pattern'], string_types):
- raise CompilationError('pattern must be a string', schema)
-
- if 'patternProperties' in schm:
- attrs['pattern_properties'] = schm.pop('patternProperties')
- if not isinstance(attrs['pattern_properties'], dict):
- raise CompilationError('patternProperties must be an object', schema) # noqa
- for name, value in attrs['pattern_properties'].items():
- subpointer = pointer_join(pointer, 'patternProperties', name)
- attrs['pattern_properties'][name] = compile(value,
- subpointer,
- context,
- scope)
-
- if 'properties' in schm:
- attrs['properties'] = schm.pop('properties')
- if not isinstance(attrs['properties'], dict):
- raise CompilationError('properties must be an object', schema)
- for name, value in attrs['properties'].items():
- subpointer = pointer_join(pointer, 'properties', name)
- attrs['properties'][name] = compile(value,
- subpointer,
- context,
- scope)
-
- if 'required' in schm:
- attrs['required'] = schm.pop('required')
- if not isinstance(attrs['required'], bool):
- raise CompilationError('required must be a boolean', schema)
-
- if 'type' in schm:
- attrs['type'] = schm.pop('type')
- if isinstance(attrs['type'], sequence_types):
- for index, value in enumerate(attrs['type']):
- if isinstance(value, dict):
- subpointer = pointer_join(pointer, 'type', index)
- attrs['type'][index] = compile(value,
- subpointer,
- context,
- scope)
- elif not isinstance(value, string_types):
- raise CompilationError('type must be an object or string', schema) # noqa
- elif not isinstance(attrs['type'], string_types):
- raise CompilationError('type must be an array or string', schema) # noqa
-
- if 'uniqueItems' in schm:
- attrs['unique_items'] = schm.pop('uniqueItems')
- if not isinstance(attrs['unique_items'], bool):
- raise CompilationError('type must be boolean', schema)
-
- return Draft03Validator(attrs, scope, context.formats)
-
-
-class Draft03Validator(Validator):
- """
- Implements `JSON Schema`_ draft-03 validation.
-
- :ivar attrs: attributes to validate against
- :ivar uri: uri of the current validator
- :ivar formats: mapping of available formats
-
- >>> validator = Draft03Validator({'min_length': 4})
- >>> assert validator('this is sparta')
-
- .. _`JSON Schema`: http://json-schema.org
- """
-
- def __init__(self, attrs, uri=None, formats=None):
- attrs = {uncamel(k): v for k, v in attrs.items()}
-
- self.attrs = attrs
- self.attrs.setdefault('additional_items', True)
- self.attrs.setdefault('pattern_properties', {})
- self.attrs.setdefault('exclusive_maximum', False)
- self.attrs.setdefault('exclusive_minimum', False)
- self.attrs.setdefault('additional_properties', True)
- self.attrs.setdefault('properties', {})
- self.uri = uri
- self.formats = formats or {}
- self.default = self.attrs.get('default', None)
- self.fail_fast = True
- self.errors = []
-
- def is_array(self, obj):
- return isinstance(obj, sequence_types)
-
- def is_boolean(self, obj):
- return isinstance(obj, bool)
-
- def is_integer(self, obj):
- return isinstance(obj, integer_types) and not isinstance(obj, bool)
-
- def is_null(self, obj):
- return obj is None
-
- def is_number(self, obj):
- return isinstance(obj, number_types) and not isinstance(obj, bool)
-
- def is_object(self, obj):
- return isinstance(obj, dict)
-
- def is_string(self, obj):
- return isinstance(obj, string_types)
-
- def validate(self, obj, pointer=None):
- """
- Validate object against validator
-
- :param obj: the object to validate
- """
-
- pointer = pointer or '#'
-
- validator = deepcopy(self)
- validator.errors = []
- validator.fail_fast = False
-
- obj = deepcopy(obj)
- obj = validator.validate_enum(obj, pointer)
- obj = validator.validate_type(obj, pointer)
- obj = validator.validate_disallow(obj, pointer)
- obj = validator.validate_extends(obj, pointer)
-
- if validator.is_array(obj):
- obj = validator.validate_max_items(obj, pointer)
- obj = validator.validate_min_items(obj, pointer)
- obj = validator.validate_items(obj, pointer)
- obj = validator.validate_unique_items(obj, pointer)
-
- if validator.is_number(obj):
- obj = validator.validate_maximum(obj, pointer)
- obj = validator.validate_minimum(obj, pointer)
- obj = validator.validate_divisible_by(obj, pointer)
-
- if validator.is_object(obj):
- obj = validator.validate_dependencies(obj, pointer)
- obj = validator.validate_properties(obj, pointer)
-
- if validator.is_string(obj):
- obj = validator.validate_max_length(obj, pointer)
- obj = validator.validate_min_length(obj, pointer)
- obj = validator.validate_pattern(obj, pointer)
- obj = validator.validate_format(obj, pointer)
-
- if validator.errors:
- raise ValidationError('multiple errors',
- obj,
- errors=validator.errors)
-
- return obj
-
- def validate_dependencies(self, obj, pointer=None):
- if 'dependencies' in self.attrs:
- missings = set()
- for name, dependencies in self.attrs['dependencies'].items():
- if name not in obj:
- continue
- if isinstance(dependencies, Validator):
- obj[name] = dependencies(obj)
- elif isinstance(dependencies, sequence_types):
- for d in dependencies:
- if d not in obj:
- missings.add(d)
- elif dependencies not in obj:
- missings.add(dependencies)
- if missings:
- missings = sorted(missings)
- self.fail('Missing properties', obj, pointer)
- return obj
-
- def validate_disallow(self, obj, pointer=None):
- if 'disallow' in self.attrs:
- disallows = self.attrs['disallow']
- if not isinstance(disallows, sequence_types):
- disallows = [disallows]
- disallowed = 0
- for type in disallows:
- try:
- if isinstance(type, Validator):
- type(obj)
- disallowed += 1
- elif type == "any":
- disallowed += 1
- elif type == "array" and self.is_array(obj):
- disallowed += 1
- elif type == "boolean" and self.is_boolean(obj):
- disallowed += 1
- elif type == "integer" and self.is_integer(obj):
- disallowed += 1
- elif type == "null" and self.is_null(obj):
- disallowed += 1
- elif type == "number" and self.is_number(obj):
- disallowed += 1
- elif type == "object" and self.is_object(obj):
- disallowed += 1
- elif type == "string" and self.is_string(obj):
- disallowed += 1
- except ValidationError:
- # let it, it may be good
- pass
- if disallowed:
- self.fail('Wrong type', obj, pointer)
- return obj
-
- def validate_divisible_by(self, obj, pointer=None):
- if 'divisible_by' in self.attrs:
- factor = Decimal(str(self.attrs['divisible_by']))
- orig = Decimal(str(obj))
- if orig % factor != 0:
- self.fail('Not a multiple of {}', obj, pointer)
- return obj
-
- def validate_enum(self, obj, pointer=None):
- if 'enum' in self.attrs:
- if obj not in self.attrs['enum']:
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_extends(self, obj, pointer=None):
- if 'extends' in self.attrs:
- extends = self.attrs['extends']
- if not isinstance(extends, sequence_types):
- extends = [extends]
- for type in extends:
- obj = type(obj)
- return obj
-
- def validate_format(self, obj, pointer=None):
- """
- ================= ============
- Expected draft03 Alias of
- ----------------- ------------
- color css.color
- date-time utc.datetime
- date utc.date
- time utc.time
- utc-millisec utc.millisec
- regex regex
- style css.style
- phone phone
- uri uri
- email email
- ip-address ipv4
- ipv6 ipv6
- host-name hostname
- ================= ============
-
- """
-
- if 'format' in self.attrs:
- substituted = {
- 'color': 'css.color',
- 'date-time': 'utc.datetime',
- 'date': 'utc.date',
- 'time': 'utc.time',
- 'utc-millisec': 'utc.millisec',
- 'regex': 'regex',
- 'style': 'css.style',
- 'phone': 'phone',
- 'uri': 'uri',
- 'email': 'email',
- 'ip-address': 'ipv4',
- 'ipv6': 'ipv6',
- 'host-name': 'hostname',
- }.get(self.attrs['format'], self.attrs['format'])
- logger.debug('use %s', substituted)
- return self.formats[substituted](obj)
- return obj
-
- def validate_items(self, obj, pointer=None):
- if 'items' in self.attrs:
- items = self.attrs['items']
- if isinstance(items, Validator):
- validator = items
- for index, element in enumerate(obj):
- with self.catch_fail():
- obj[index] = validator(element, pointer_join(pointer, index)) # noqa
- return obj
- elif isinstance(items, (list, tuple)):
- additionals = self.attrs['additional_items']
- validators = items
- for index, element in enumerate(obj):
- with self.catch_fail():
- try:
- validator = validators[index]
- except IndexError:
- if additionals is True:
- return obj
- elif additionals is False:
- self.fail('Additional elements are forbidden',
- obj,
- pointer_join(pointer, index))
- continue
- validator = additionals
- obj[index] = validator(element, pointer_join(pointer, index)) # noqa
- return obj
- else:
- raise NotImplementedError(items)
- return obj
-
- def validate_max_items(self, obj, pointer=None):
- if 'max_items' in self.attrs:
- count = len(obj)
- if count > self.attrs['max_items']:
- self.fail('Too many items', obj, pointer)
- return obj
-
- def validate_max_length(self, obj, pointer=None):
- if 'max_length' in self.attrs:
- length = len(obj)
- if length > self.attrs['max_length']:
- self.fail('Too long', obj, pointer)
- return obj
-
- def validate_maximum(self, obj, pointer=None):
- if 'maximum' in self.attrs:
- if obj > self.attrs['maximum']:
- self.fail('Too big number', obj, pointer)
- if self.attrs['exclusive_maximum'] and obj == self.attrs['maximum']: # noqa
- self.fail('Too big number', obj, pointer)
- return obj
-
- def validate_min_items(self, obj, pointer=None):
- if 'min_items' in self.attrs:
- count = len(obj)
- if count < self.attrs['min_items']:
- self.fail('Too few items', obj, pointer)
- return obj
-
- def validate_min_length(self, obj, pointer=None):
- if 'min_length' in self.attrs:
- length = len(obj)
- if length < self.attrs['min_length']:
- self.fail('Too short', obj, pointer)
- return obj
-
- def validate_minimum(self, obj, pointer=None):
- if 'minimum' in self.attrs:
- if obj < self.attrs['minimum']:
- self.fail('Too low number', obj, pointer)
- if self.attrs['exclusive_minimum'] and obj == self.attrs['minimum']: # noqa
- self.fail('Too low number',
- obj,
- pointer)
- return obj
-
- def validate_pattern(self, obj, pointer=None):
- if 'pattern' in self.attrs:
- regex = re.compile(self.attrs['pattern'])
- if not regex.search(obj):
- self.fail('Does not match pattern', obj, pointer)
- return obj
-
- def validate_properties(self, obj, pointer=None):
- validated = set()
- pending = set(obj.keys())
-
- for name, validator in self.attrs['properties'].items():
- if name in obj:
- with self.catch_fail():
- pending.discard(name)
- obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
- validated.add(name)
- elif not validator.is_optional():
- self.fail('Required property', obj, pointer)
-
- for pattern, validator in self.attrs['pattern_properties'].items():
- regex = re.compile(pattern)
- for name, value in obj.items():
- if regex.search(name):
- with self.catch_fail():
- pending.discard(name)
- obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
- validated.add(name)
-
- if not pending:
- return obj
-
- if self.attrs['additional_properties'] is True:
- return obj
-
- if self.attrs['additional_properties'] is False:
- if len(obj) > len(validated):
- self.fail('Additional properties are forbidden', obj, pointer) # noqa
- return obj
-
- validator = self.attrs['additional_properties']
- for name, value in obj.items():
- if name not in validated:
- obj[name] = validator(value, pointer_join(pointer, name))
- validated.add(name)
-
- return obj
-
- def validate_type(self, obj, pointer=None):
- if 'type' in self.attrs:
- types = self.attrs['type']
- if not isinstance(types, sequence_types):
- types = [types]
- for type in types:
- try:
- if isinstance(type, Validator):
- return type(obj)
- elif type == "any":
- return obj
- elif type == "array" and self.is_array(obj):
- return obj
- elif type == "boolean" and self.is_boolean(obj):
- return obj
- elif type == "integer" and self.is_integer(obj):
- return obj
- elif type == "null" and self.is_null(obj):
- return obj
- elif type == "number" and self.is_number(obj):
- return obj
- elif type == "object" and self.is_object(obj):
- return obj
- elif type == "string" and self.is_string(obj):
- return obj
- except ValidationError:
- # let it, it may be good
- pass
- self.fail('Wrong type', obj, pointer)
- return obj
-
- def validate_unique_items(self, obj, pointer=None):
- if self.attrs.get('unique_items'):
- if len(obj) > len(set(json.dumps(element) for element in obj)):
- self.fail('Elements must be unique', obj, pointer)
- return obj
-
- def has_default(self):
- """docstring for has_default"""
- return False
-
- def is_optional(self):
- """
- True by default.
- """
- return not self.attrs.get('required', False)
-
- def fail(self, reason, obj, pointer=None):
- """
- Called when validation fails.
- """
- pointer = pointer_join(pointer)
- err = ValidationError(reason, obj, pointer)
- if self.fail_fast:
- raise err
- else:
- self.errors.append(err)
- return err
-
- def catch_fail(self):
- return FailCatcher(self)
-
-
-class FailCatcher(object):
- def __init__(self, validator):
- self.validator = validator
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- if isinstance(value, ValidationError) and not self.validator.fail_fast:
- self.validator.errors.append(value)
- return True
- return False
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft04.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft04.py
deleted file mode 100644
index 6d2fc9c6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/draft04.py
+++ /dev/null
@@ -1,681 +0,0 @@
-"""
- jsonspec.validators.draft04
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Implements JSON Schema draft04.
-"""
-
-from __future__ import absolute_import
-
-import logging
-import re
-from copy import deepcopy
-from decimal import Decimal
-from six import integer_types, string_types
-from six.moves.urllib.parse import urljoin
-from .bases import ReferenceValidator, Validator
-from .exceptions import CompilationError
-from .factorize import register
-from jsonspec.validators.exceptions import ValidationError
-from jsonspec.validators.util import uncamel
-from jsonspec.validators.pointer_util import pointer_join
-from jsonspec import driver as json
-
-__all__ = ['compile', 'Draft04Validator']
-
-sequence_types = (list, set, tuple)
-number_types = (integer_types, float, Decimal)
-logger = logging.getLogger(__name__)
-
-
-@register(spec='http://json-schema.org/draft-04/schema#')
-def compile(schema, pointer, context, scope=None):
- """
- Compiles schema with `JSON Schema`_ draft-04.
-
- :param schema: obj to compile
- :type schema: Mapping
- :param pointer: uri of the schema
- :type pointer: Pointer, str
- :param context: context of this schema
- :type context: Context
-
- .. _`JSON Schema`: http://json-schema.org
- """
-
- schm = deepcopy(schema)
-
- scope = urljoin(scope or str(pointer), schm.pop('id', None))
-
- if '$ref' in schema:
- return ReferenceValidator(urljoin(scope, schema['$ref']), context)
-
- attrs = {}
-
- if 'additionalItems' in schm:
- subpointer = pointer_join(pointer, 'additionalItems')
- attrs['additional_items'] = schm.pop('additionalItems')
- if isinstance(attrs['additional_items'], dict):
- compiled = compile(attrs['additional_items'],
- subpointer,
- context,
- scope)
- attrs['additional_items'] = compiled
- elif not isinstance(attrs['additional_items'], bool):
- raise CompilationError('wrong type for {}'.format('additional_items'), schema) # noqa
-
- if 'additionalProperties' in schm:
- subpointer = pointer_join(pointer, 'additionalProperties')
- attrs['additional_properties'] = schm.pop('additionalProperties')
- if isinstance(attrs['additional_properties'], dict):
- compiled = compile(attrs['additional_properties'],
- subpointer,
- context,
- scope)
- attrs['additional_properties'] = compiled
- elif not isinstance(attrs['additional_properties'], bool):
- raise CompilationError('wrong type for {}'.format('additional_properties'), schema) # noqa
-
- if 'allOf' in schm:
- subpointer = pointer_join(pointer, 'allOf')
- attrs['all_of'] = schm.pop('allOf')
- if isinstance(attrs['all_of'], (list, tuple)):
- attrs['all_of'] = [compile(element, subpointer, context, scope) for element in attrs['all_of']] # noqa
- else:
- # should be a boolean
- raise CompilationError('wrong type for {}'.format('allOf'), schema) # noqa
-
- if 'anyOf' in schm:
- subpointer = pointer_join(pointer, 'anyOf')
- attrs['any_of'] = schm.pop('anyOf')
- if isinstance(attrs['any_of'], (list, tuple)):
- attrs['any_of'] = [compile(element, subpointer, context, scope) for element in attrs['any_of']] # noqa
- else:
- # should be a boolean
- raise CompilationError('wrong type for {}'.format('anyOf'), schema) # noqa
-
- if 'default' in schm:
- attrs['default'] = schm.pop('default')
-
- if 'dependencies' in schm:
- attrs['dependencies'] = schm.pop('dependencies')
- if not isinstance(attrs['dependencies'], dict):
- raise CompilationError('dependencies must be an object', schema)
- for key, value in attrs['dependencies'].items():
- if isinstance(value, dict):
- subpointer = pointer_join(pointer, 'dependencies', key)
- attrs['dependencies'][key] = compile(value,
- subpointer,
- context,
- scope)
- elif not isinstance(value, sequence_types):
- raise CompilationError('dependencies must be an array or object', schema) # noqa
-
- if 'enum' in schm:
- attrs['enum'] = schm.pop('enum')
- if not isinstance(attrs['enum'], sequence_types):
- raise CompilationError('enum must be a sequence', schema)
-
- if 'exclusiveMaximum' in schm:
- attrs['exclusive_maximum'] = schm.pop('exclusiveMaximum')
- if not isinstance(attrs['exclusive_maximum'], bool):
- raise CompilationError('exclusiveMaximum must be a boolean', schema) # noqa
-
- if 'exclusiveMinimum' in schm:
- attrs['exclusive_minimum'] = schm.pop('exclusiveMinimum')
- if not isinstance(attrs['exclusive_minimum'], bool):
- raise CompilationError('exclusiveMinimum must be a boolean', schema) # noqa
-
- if 'format' in schm:
- attrs['format'] = schm.pop('format')
- if not isinstance(attrs['format'], string_types):
- raise CompilationError('format must be a string', schema)
-
- if 'items' in schm:
- subpointer = pointer_join(pointer, 'items')
- attrs['items'] = schm.pop('items')
- if isinstance(attrs['items'], (list, tuple)):
- # each value must be a json schema
- attrs['items'] = [compile(element, subpointer, context, scope) for element in attrs['items']] # noqa
- elif isinstance(attrs['items'], dict):
- # value must be a json schema
- attrs['items'] = compile(attrs['items'], subpointer, context, scope) # noqa
- else:
- # should be a boolean
- raise CompilationError('wrong type for {}'.format('items'), schema) # noqa
-
- if 'maximum' in schm:
- attrs['maximum'] = schm.pop('maximum')
- if not isinstance(attrs['maximum'], number_types):
- raise CompilationError('maximum must be a number', schema)
-
- if 'maxItems' in schm:
- attrs['max_items'] = schm.pop('maxItems')
- if not isinstance(attrs['max_items'], integer_types):
- raise CompilationError('maxItems must be integer', schema)
-
- if 'maxLength' in schm:
- attrs['max_length'] = schm.pop('maxLength')
- if not isinstance(attrs['max_length'], integer_types):
- raise CompilationError('maxLength must be integer', schema)
-
- if 'maxProperties' in schm:
- attrs['max_properties'] = schm.pop('maxProperties')
- if not isinstance(attrs['max_properties'], integer_types):
- raise CompilationError('maxProperties must be integer', schema)
-
- if 'minimum' in schm:
- attrs['minimum'] = schm.pop('minimum')
- if not isinstance(attrs['minimum'], number_types):
- raise CompilationError('minimum must be a number', schema)
-
- if 'minItems' in schm:
- attrs['min_items'] = schm.pop('minItems')
- if not isinstance(attrs['min_items'], integer_types):
- raise CompilationError('minItems must be integer', schema)
-
- if 'minLength' in schm:
- attrs['min_length'] = schm.pop('minLength')
- if not isinstance(attrs['min_length'], integer_types):
- raise CompilationError('minLength must be integer', schema)
-
- if 'minProperties' in schm:
- attrs['min_properties'] = schm.pop('minProperties')
- if not isinstance(attrs['min_properties'], integer_types):
- raise CompilationError('minProperties must be integer', schema)
-
- if 'multipleOf' in schm:
- attrs['multiple_of'] = schm.pop('multipleOf')
- if not isinstance(attrs['multiple_of'], number_types):
- raise CompilationError('multipleOf must be a number', schema)
-
- if 'not' in schm:
- attrs['not'] = schm.pop('not')
- if not isinstance(attrs['not'], dict):
- raise CompilationError('not must be an object', schema)
- subpointer = pointer_join(pointer, 'not')
- attrs['not'] = compile(attrs['not'], subpointer, context, scope)
-
- if 'oneOf' in schm:
- subpointer = pointer_join(pointer, 'oneOf')
- attrs['one_of'] = schm.pop('oneOf')
- if isinstance(attrs['one_of'], (list, tuple)):
- # each value must be a json schema
- attrs['one_of'] = [compile(element, subpointer, context, scope) for element in attrs['one_of']] # noqa
- else:
- # should be a boolean
- raise CompilationError('wrong type for {}'.format('oneOf'), schema)
-
- if 'pattern' in schm:
- attrs['pattern'] = schm.pop('pattern')
- if not isinstance(attrs['pattern'], string_types):
- raise CompilationError('pattern must be a string', schema)
-
- if 'properties' in schm:
- attrs['properties'] = schm.pop('properties')
- if not isinstance(attrs['properties'], dict):
- raise CompilationError('properties must be an object', schema)
- for subname, subschema in attrs['properties'].items():
- subpointer = pointer_join(pointer, subname)
- compiled = compile(subschema, subpointer, context, scope)
- attrs['properties'][subname] = compiled
-
- if 'patternProperties' in schm:
- attrs['pattern_properties'] = schm.pop('patternProperties')
- if not isinstance(attrs['pattern_properties'], dict):
- raise CompilationError('patternProperties must be an object', schema) # noqa
- for subname, subschema in attrs['pattern_properties'].items():
- subpointer = pointer_join(pointer, 'patternProperties', subname)
- compiled = compile(subschema, subpointer, context, scope)
- attrs['pattern_properties'][subname] = compiled
-
- if 'required' in schm:
- attrs['required'] = schm.pop('required')
- if not isinstance(attrs['required'], list):
- raise CompilationError('required must be a list', schema)
- if len(attrs['required']) < 1:
- raise CompilationError('required cannot be empty', schema)
-
- if 'type' in schm:
- attrs['type'] = schm.pop('type')
- if isinstance(attrs['type'], string_types):
- attrs['type'] = [attrs['type']]
- elif not isinstance(attrs['type'], sequence_types):
- raise CompilationError('type must be string or sequence', schema)
-
- if 'uniqueItems' in schm:
- attrs['unique_items'] = schm.pop('uniqueItems')
- if not isinstance(attrs['unique_items'], bool):
- raise CompilationError('type must be boolean', schema)
-
- return Draft04Validator(attrs, str(pointer), context.formats)
-
-
-class Draft04Validator(Validator):
- """
- Implements `JSON Schema`_ draft-04 validation.
-
- :ivar attrs: attributes to validate against
- :ivar uri: uri of the current validator
- :ivar formats: mapping of available formats
-
- >>> validator = Draft04Validator({'min_length': 4})
- >>> assert validator('this is sparta')
-
- .. _`JSON Schema`: http://json-schema.org
- """
-
- def __init__(self, attrs, uri=None, formats=None):
- attrs = {uncamel(k): v for k, v in attrs.items()}
-
- self.formats = formats or {}
- self.attrs = attrs
- self.attrs.setdefault('additional_items', True)
- self.attrs.setdefault('additional_properties', True)
- self.attrs.setdefault('exclusive_maximum', False),
- self.attrs.setdefault('exclusive_minimum', False),
- self.attrs.setdefault('pattern_properties', {})
- self.attrs.setdefault('properties', {})
- self.uri = uri
- self.default = self.attrs.get('default', None)
- self.fail_fast = True
- self.errors = []
-
- def validate(self, obj, pointer=None):
- """
- Validate object against validator
-
- :param obj: the object to validate
- """
-
- pointer = pointer or '#'
-
- validator = deepcopy(self)
- validator.errors = []
- validator.fail_fast = False
-
- obj = deepcopy(obj)
- obj = validator.validate_enum(obj, pointer)
- obj = validator.validate_type(obj, pointer)
- obj = validator.validate_not(obj, pointer)
- obj = validator.validate_all_of(obj, pointer)
- obj = validator.validate_any_of(obj, pointer)
- obj = validator.validate_one_of(obj, pointer)
-
- if self.is_array(obj):
- obj = validator.validate_items(obj, pointer)
- obj = validator.validate_max_items(obj, pointer)
- obj = validator.validate_min_items(obj, pointer)
- obj = validator.validate_unique_items(obj, pointer)
- elif self.is_number(obj):
- obj = validator.validate_maximum(obj, pointer)
- obj = validator.validate_minimum(obj, pointer)
- obj = validator.validate_multiple_of(obj, pointer)
- elif self.is_object(obj):
- obj = validator.validate_required(obj, pointer)
- obj = validator.validate_max_properties(obj, pointer)
- obj = validator.validate_min_properties(obj, pointer)
- obj = validator.validate_dependencies(obj, pointer)
- obj = validator.validate_properties(obj, pointer)
- obj = validator.validate_default_properties(obj, pointer)
- elif self.is_string(obj):
- obj = validator.validate_max_length(obj, pointer)
- obj = validator.validate_min_length(obj, pointer)
- obj = validator.validate_pattern(obj, pointer)
- obj = validator.validate_format(obj, pointer)
-
- if validator.errors:
- raise ValidationError('multiple errors',
- obj,
- errors=validator.errors)
-
- return obj
-
- def is_array(self, obj):
- return isinstance(obj, sequence_types)
-
- def is_boolean(self, obj):
- return isinstance(obj, bool)
-
- def is_integer(self, obj):
- return isinstance(obj, integer_types) and not isinstance(obj, bool)
-
- def is_number(self, obj):
- return isinstance(obj, number_types) and not isinstance(obj, bool)
-
- def is_object(self, obj):
- return isinstance(obj, dict)
-
- def is_string(self, obj):
- return isinstance(obj, string_types)
-
- def has_default(self):
- return 'default' in self.attrs
-
- def validate_all_of(self, obj, pointer=None):
- for validator in self.attrs.get('all_of', []):
- obj = validator(obj)
- return obj
-
- def validate_any_of(self, obj, pointer=None):
- if 'any_of' in self.attrs:
- for validator in self.attrs['any_of']:
- try:
- obj = validator(obj)
- return obj
- except ValidationError:
- pass
- self.fail('Not in any_of', obj, pointer)
- return obj
-
- def validate_default_properties(self, obj, pointer=None):
- # Reinject defaults from properties.
- for name, validator in self.attrs.get('properties', {}).items():
- if name not in obj and validator.has_default():
- obj[name] = deepcopy(validator.default)
- return obj
-
- def validate_dependencies(self, obj, pointer=None):
- for key, dependencies in self.attrs.get('dependencies', {}).items():
- if key in obj:
- if isinstance(dependencies, sequence_types):
- for name in set(dependencies) - set(obj.keys()):
- self.fail('Missing property', obj, pointer_join(pointer, name)) # noqa
- else:
- dependencies(obj)
- return obj
-
- def validate_enum(self, obj, pointer=None):
- if 'enum' in self.attrs:
- if obj not in self.attrs['enum']:
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_format(self, obj, pointer=None):
- """
- ================= ============
- Expected draft04 Alias of
- ----------------- ------------
- date-time rfc3339.datetime
- email email
- hostname hostname
- ipv4 ipv4
- ipv6 ipv6
- uri uri
- ================= ============
-
- """
- if 'format' in self.attrs:
- substituted = {
- 'date-time': 'rfc3339.datetime',
- 'email': 'email',
- 'hostname': 'hostname',
- 'ipv4': 'ipv4',
- 'ipv6': 'ipv6',
- 'uri': 'uri',
- }.get(self.attrs['format'], self.attrs['format'])
- logger.debug('use %s', substituted)
- try:
- return self.formats[substituted](obj)
- except ValidationError as error:
- logger.error(error)
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_items(self, obj, pointer=None):
- if 'items' in self.attrs:
- items = self.attrs['items']
- if isinstance(items, Validator):
- validator = items
- for index, element in enumerate(obj):
- with self.catch_fail():
- obj[index] = validator(element, pointer_join(pointer, index)) # noqa
- return obj
- elif isinstance(items, (list, tuple)):
- additionals = self.attrs['additional_items']
- validators = items
-
- validated = list(obj)
- for index, element in enumerate(validated):
- with self.catch_fail():
- try:
- validator = validators[index]
- except IndexError:
- if additionals is True:
- return obj
- elif additionals is False:
- self.fail('Forbidden value',
- obj,
- pointer=pointer_join(self.uri, index)) # noqa
- continue
- validator = additionals
- validated[index] = \
- validator(element, pointer_join(pointer, index)) # noqa
- obj = obj.__class__(validated)
- return obj
- else:
- raise NotImplementedError(items)
- return obj
-
- def validate_maximum(self, obj, pointer=None):
- if 'maximum' in self.attrs:
- m = self.attrs['maximum']
- if obj < m:
- return obj
- exclusive = self.attrs['exclusive_maximum']
- if not exclusive and (obj == m):
- return obj
- self.fail('Exceeded maximum', obj, pointer)
- return obj
-
- def validate_max_items(self, obj, pointer=None):
- if 'max_items' in self.attrs:
- count = len(obj)
- if count > self.attrs['max_items']:
- self.fail('Too many elements', obj, pointer)
- return obj
-
- def validate_max_length(self, obj, pointer=None):
- if 'max_length' in self.attrs:
- length = len(obj)
- if length > self.attrs['max_length']:
- self.fail('Too long', obj, pointer)
- return obj
-
- def validate_max_properties(self, obj, pointer=None):
- if 'max_properties' in self.attrs:
- count = len(obj)
- if count > self.attrs['max_properties']:
- self.fail('Too many properties', obj, pointer)
- return obj
-
- def validate_minimum(self, obj, pointer=None):
- if 'minimum' in self.attrs:
- m = self.attrs['minimum']
- if obj > m:
- return obj
- exclusive = self.attrs['exclusive_minimum']
- if not exclusive and (obj == m):
- return obj
- self.fail('Too small', obj, pointer)
- return obj
-
- def validate_min_items(self, obj, pointer=None):
- if 'min_items' in self.attrs:
- count = len(obj)
- if count < self.attrs['min_items']:
- self.fail('Too few elements', obj, pointer)
- return obj
-
- def validate_min_length(self, obj, pointer=None):
- if 'min_length' in self.attrs:
- length = len(obj)
- if length < self.attrs['min_length']:
- self.fail('Too short', obj, pointer)
- return obj
-
- def validate_min_properties(self, obj, pointer=None):
- if 'min_properties' in self.attrs:
- count = len(obj)
- if count < self.attrs['min_properties']:
- self.fail('Too few properties', obj, pointer)
- return obj
-
- def validate_multiple_of(self, obj, pointer=None):
- if 'multiple_of' in self.attrs:
- factor = Decimal(str(self.attrs['multiple_of']))
- orig = Decimal(str(obj))
- if orig % factor != 0:
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_not(self, obj, pointer=None):
- if 'not' in self.attrs:
- try:
- validator = self.attrs['not']
- validator(obj)
- except ValidationError:
- return obj
- else:
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_one_of(self, obj, pointer=None):
- if 'one_of' in self.attrs:
- validated = 0
- for validator in self.attrs['one_of']:
- try:
- validated_obj = validator(obj)
- validated += 1
- except ValidationError:
- pass
- if not validated:
- self.fail('Validates noone', obj)
- elif validated == 1:
- return validated_obj
- else:
- self.fail('Validates more than once', obj)
- return obj
-
- def validate_pattern(self, obj, pointer=None):
- if 'pattern' in self.attrs:
- pattern = self.attrs['pattern']
- if re.search(pattern, obj):
- return obj
- self.fail('Forbidden value', obj, pointer)
- return obj
-
- def validate_properties(self, obj, pointer=None):
- validated = set()
- pending = set(obj.keys())
- response = {}
-
- if not obj:
- return response
-
- for name, validator in self.attrs['properties'].items():
- if name in obj:
- with self.catch_fail():
- pending.discard(name)
- obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
- validated.add(name)
-
- for pattern, validator in self.attrs['pattern_properties'].items():
- for name in sorted(obj.keys()):
- if re.search(pattern, name):
- with self.catch_fail():
- pending.discard(name)
- obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
- validated.add(name)
-
- if not pending:
- return obj
-
- additionals = self.attrs['additional_properties']
- if additionals is True:
- return obj
-
- if additionals is False:
- for name in pending:
- self.fail('Forbidden property', obj, pointer_join(pointer, name)) # noqa
- return obj
-
- validator = additionals
- for name in sorted(pending):
- obj[name] = validator(obj.pop(name), pointer_join(pointer, name)) # noqa
- validated.add(name)
- return obj
-
- def validate_required(self, obj, pointer=None):
- if 'required' in self.attrs:
- for name in self.attrs['required']:
- if name not in obj:
- self.fail('Missing property', obj, pointer_join(pointer, name)) # noqa
- return obj
-
- def validate_type(self, obj, pointer=None):
- if 'type' in self.attrs:
- types = self.attrs['type']
- if isinstance(types, string_types):
- types = [types]
-
- for t in types:
- if t == 'array' and self.is_array(obj):
- return obj
- if t == 'boolean' and self.is_boolean(obj):
- return obj
- if t == 'integer' and self.is_integer(obj):
- return obj
- if t == 'number' and self.is_number(obj):
- return obj
- if t == 'null' and obj is None:
- return obj
- if t == 'object' and self.is_object(obj):
- return obj
- if t == 'string' and self.is_string(obj):
- return obj
-
- self.fail('Wrong type', obj, pointer)
- return obj
-
- def validate_unique_items(self, obj, pointer=None):
- if self.attrs.get('unique_items'):
- if len(obj) > len(set(json.dumps(element) for element in obj)):
- self.fail('Elements must be unique', obj, pointer)
- return obj
-
- def is_optional(self):
- """
- Returns True, beceause it is meaningless in draft04.
- """
- logger.warn('asking for is_optional')
- return True
-
- def fail(self, reason, obj, pointer=None):
- """
- Called when validation fails.
- """
- pointer = pointer_join(pointer)
- err = ValidationError(reason, obj, pointer)
- if self.fail_fast:
- raise err
- else:
- self.errors.append(err)
- return err
-
- def catch_fail(self):
- return FailCatcher(self)
-
-
-class FailCatcher(object):
- def __init__(self, validator):
- self.validator = validator
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- if isinstance(value, ValidationError) and not self.validator.fail_fast:
- self.validator.errors.append(value)
- return True
- return False
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/exceptions.py
deleted file mode 100644
index 9b4ac4ed..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/exceptions.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
- jsonspec.validators.exceptions
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from __future__ import absolute_import
-
-__all__ = ['CompilationError', 'ReferenceError', 'ValidationError']
-
-from collections import defaultdict
-
-
-class CompilationError(Exception):
- """Raised while schema parsing"""
- def __init__(self, message, schema):
- super(CompilationError, self).__init__(message, schema)
- self.schema = schema
-
-
-class ReferenceError(Exception):
- """Raised while reference error"""
- def __init__(self, *args):
- super(ReferenceError, self).__init__(*args)
-
-
-class ValidationError(ValueError):
- """Raised when validation fails"""
- def __init__(self, reason, obj=None, pointer=None, errors=None):
- """
- :param reason: the reason failing
- :param obj: the obj that fails
- :param errors: sub errors, if they exists
- """
- super(ValidationError, self).__init__(reason, obj)
- self.obj = obj
- self.pointer = pointer
-
- self.errors = set()
- if isinstance(errors, (list, tuple, set)):
- self.errors.update(errors)
- elif isinstance(errors, Exception):
- self.errors.add(errors)
-
- def flatten(self):
- """
- Flatten nested errors.
-
- {pointer: reasons}
- """
- return flatten(self)
-
-
-def flatten(error):
- def iter_it(src):
- if isinstance(src, (list, set, tuple)):
- for error in src:
- for pointer, reason in iter_it(error):
- yield pointer, reason
- if isinstance(src, ValidationError):
- if src.errors:
- for pointer, reason in iter_it(src.errors):
- yield pointer, reason
- if src.pointer:
- yield src.pointer, src.args[0]
-
- data = defaultdict(set)
- for pointer, reason in iter_it(error):
- data[pointer].add(reason)
- return dict(data)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/factorize.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/factorize.py
deleted file mode 100644
index 8dafcdbf..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/factorize.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""
- jsonspec.validators.factorize
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-import logging
-from functools import partial
-from jsonspec.pointer import DocumentPointer
-from jsonspec.pointer.exceptions import ExtractError
-from jsonspec.reference import LocalRegistry
-from .exceptions import CompilationError
-from .formats import FormatRegistry
-
-__all__ = ['Context', 'Factory', 'register']
-
-logger = logging.getLogger(__name__)
-
-
-class Context(object):
- """
-
- :ivar factory: global factory
- :ivar registry: the current registry
- :ivar spec: the current spec
- :ivar formats: the current formats exposed
- """
- def __init__(self, factory, registry, spec=None, formats=None):
- self.factory = factory
- self.registry = registry
- self.spec = spec
- self.formats = formats
-
- def __call__(self, schema, pointer):
- return self.factory(schema, pointer, self.spec)
-
- def resolve(self, pointer):
- try:
- dp = DocumentPointer(pointer)
- if dp.is_inner():
- logger.debug('resolve inner %s', pointer)
- return self.factory.local(self.registry.resolve(pointer),
- pointer,
- self.registry,
- self.spec)
-
- logger.debug('resolve outside %s', pointer)
- return self.factory(self.registry.resolve(pointer),
- pointer,
- self.spec)
- except ExtractError as error:
- raise CompilationError({}, error)
-
-
-class Factory(object):
- """
-
- :ivar provider: global registry
- :ivar spec: default spec
- """
-
- spec = 'http://json-schema.org/draft-04/schema#'
- compilers = {}
-
- def __init__(self, provider=None, spec=None, formats=None):
- self.provider = provider or {}
- self.spec = spec or self.spec
- if not isinstance(formats, FormatRegistry):
- formats = FormatRegistry(formats)
- self.formats = formats
-
- def __call__(self, schema, pointer, spec=None):
- try:
- spec = schema.get('$schema', spec or self.spec)
- compiler = self.compilers[spec]
- except KeyError:
- raise CompilationError('{!r} not registered'.format(spec), schema)
-
- registry = LocalRegistry(schema, self.provider)
- local = DocumentPointer(pointer)
-
- if local.document:
- registry[local.document] = schema
- local.document = ''
- context = Context(self, registry, spec, self.formats)
- return compiler(schema, pointer, context)
-
- def local(self, schema, pointer, registry, spec=None):
- try:
- spec = schema.get('$schema', spec or self.spec)
- compiler = self.compilers[spec]
- except KeyError:
- raise CompilationError('{!r} not registered'.format(spec))
-
- context = Context(self, registry, spec, self.formats)
- return compiler(schema, pointer, context)
-
- @classmethod
- def register(cls, spec, compiler):
- cls.compilers[spec] = compiler
- return compiler
-
-
-def register(compiler=None, spec=None):
- """
- Expose compiler to factory.
-
- :param compiler: the callable to expose
- :type compiler: callable
- :param spec: name of the spec
- :type spec: str
-
- It can be used as a decorator::
-
- @register(spec='my:first:spec')
- def my_compiler(schema, pointer, context):
- return Validator(schema)
-
- or as a function::
-
- def my_compiler(schema, pointer, context):
- return Validator(schema)
-
- register(my_compiler, 'my:second:spec')
-
- """
- if not spec:
- raise CompilationError('Spec is required')
- if not compiler:
- return partial(register, spec=spec)
- return Factory.register(spec, compiler)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/formats.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/formats.py
deleted file mode 100644
index f5ecdc76..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/formats.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""
- jsonspec.validators.formats
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from __future__ import absolute_import
-
-import logging
-from functools import partial
-from pkg_resources import iter_entry_points, DistributionNotFound
-from .exceptions import CompilationError
-
-__all__ = ['register', 'FormatRegistry']
-
-logger = logging.getLogger(__name__)
-
-
-class FormatRegistry(object):
- """
- Declare callables that must validate strings.
-
- Callables are be injected in two ways:
-
- - Using the instance register method:
-
- .. code-block:: python
-
- registry = FormatRegistry()
- registry.register('foo', bar)
-
- - Using the class register method:
-
- .. code-block:: python
-
- FormatRegistry.register('foo', bar)
- registry = FormatRegistry()
- assert 'foo' in registry
-
- - Every callables declared into setuptools ``entry_points``
- are automatically loaded.
-
- For example, with this setup.cfg:
-
- .. code-block:: ini
-
- [entry_points]
- jsonspec.validators.formats =
- date-time = jsonspec.validators.util:validate_datetime
- email = jsonspec.validators.util:validate_email
- hostname = jsonspec.validators.util:validate_hostname
- ipv4 = jsonspec.validators.util:validate_ipv4
- ipv6 = jsonspec.validators.util:validate_ipv6
- uri = jsonspec.validators.util:validate_uri
-
- .. code-block:: python
-
- registry = FormatRegistry()
- assert 'date-time' in registry
-
- """
-
- namespace = 'jsonspec.validators.formats'
- custom = {}
-
- def __init__(self, data=None, namespace=None):
- self.custom = data or self.custom
- self.loaded = {}
- self.fallback = {}
- self.namespace = namespace or self.namespace
-
- def __getitem__(self, name):
- if name in self.custom:
- return self.custom[name]
- if name in self.loaded:
- return self.loaded[name]
- if name in self.fallback:
- return self.fallback[name]
- return self.load(name)
-
- def __contains__(self, name):
- return name in self.custom or name in self.loaded
-
- def load(self, name):
- error = None
-
- for entrypoint in iter_entry_points(self.namespace):
- try:
- if entrypoint.name == name:
- self.loaded[name] = entrypoint.load()
- return self.loaded[name]
- except DistributionNotFound as error:
- pass
-
- if error:
- logger.warn('Unable to load %s: %s is missing', name, error)
- else:
- logger.warn('%s is not defined', name)
-
- def fallback(obj):
- logger.info('Unable to validate %s: %s is missing', name, error)
- return obj
- fallback.__doc__ = 'fallback for {!r} validation'.format(name)
- self.fallback[name] = fallback
- return self.fallback[name]
-
- @classmethod
- def register(cls, name, func):
- cls.custom[name] = func
-
-
-def register(func=None, name=None):
- """
- Expose compiler to factory.
-
- :param func: the callable to expose
- :type func: callable
- :param name: name of format
- :type name: str
-
- It can be used as a decorator::
-
- @register(name='my:validator')
- def my_validator(obj):
- if obj is True:
- return obj
- raise ValidationError('obj is not true')
-
- or as a function::
-
- def my_validator(obj):
- if obj is True:
- return obj
- raise ValidationError('obj is not true')
-
- @register(name='my:validator')
-
- """
- if not name:
- raise CompilationError('Name is required')
- if not func:
- return partial(register, name=name)
- return FormatRegistry.register(name, func)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/pointer_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/pointer_util.py
deleted file mode 100644
index f7d002df..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/pointer_util.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
- jsonspec.validators.pointer_util
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from __future__ import absolute_import
-
-
-def pointer_join(pre, *parts):
- resp = str(pre or '#')
- if resp == '#/':
- resp == '#'
- for part in parts:
- if not resp.endswith('/'):
- resp += '/'
- resp += str(part)
- if resp == '#':
- resp = '#/'
- return resp
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/util.py
deleted file mode 100644
index fb19ed9e..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/jsonspec/validators/util.py
+++ /dev/null
@@ -1,243 +0,0 @@
-"""
- jsonspec.validators.util
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
-"""
-
-from __future__ import absolute_import
-
-import logging
-import re
-import time
-from copy import deepcopy
-from decimal import Decimal
-from datetime import tzinfo, timedelta, datetime, date
-from six import text_type
-from six import integer_types
-from six.moves.urllib.parse import urlparse
-from .exceptions import ValidationError
-
-__all__ = []
-
-number_types = (integer_types, float, Decimal)
-
-logger = logging.getLogger(__name__)
-
-HOSTNAME_TOKENS = re.compile('(?!-)[a-z\d-]{1,63}(?>> uncamel('fooBar')
- 'foo_bar'
- >>> uncamel('FooBar')
- 'foo_bar'
- >>> uncamel('_fooBar')
- '_foo_bar'
- >>> uncamel('_FooBar')
- '__foo_bar'
- """
- response, name = name[0].lower(), name[1:]
- for n in name:
- if n.isupper():
- response += '_' + n.lower()
- else:
- response += n
- return response
-
-
-class offset(tzinfo):
- def __init__(self, value):
- self.value = value
-
- def utcoffset(self, dt):
- hours, minutes = self.value.split(':', 1)
- return timedelta(hours=int(hours), minutes=int(minutes))
-
- def tzname(self, dt):
- return '{}'.format(self.value)
-
-
-def rfc3339_to_datetime(data):
- """convert a rfc3339 date representation into a Python datetime"""
- try:
- ts = time.strptime(data, '%Y-%m-%d')
- return date(*ts[:3])
- except ValueError:
- pass
-
- try:
- dt, _, tz = data.partition('Z')
- if tz:
- tz = offset(tz)
- else:
- tz = offset('00:00')
- if '.' in dt and dt.rsplit('.', 1)[-1].isdigit():
- ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S.%f')
- else:
- ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S')
- return datetime(*ts[:6], tzinfo=tz)
- except ValueError:
- raise ValueError('date-time {!r} is not a valid rfc3339 date representation'.format(data)) # noqa
-
-
-def validate_css_color(obj):
- color = obj.lower()
- if len(color) == 7 and re.match('^#[0-9a-f]{6}$', color):
- return obj
- elif len(color) == 4 and re.match('^#[0-9a-f]{3}$', color):
- return obj
- elif color not in CSS_COLORS:
- raise ValidationError('Not a css color {!r}'.format(obj))
- return obj
-
-
-def validate_rfc3339_datetime(obj):
- try:
- rfc3339_to_datetime(obj)
- except ValueError:
- raise ValidationError('{!r} is not a valid datetime', obj)
- return obj
-
-
-def validate_utc_datetime(obj):
- if not obj.endswith('Z'):
- raise ValidationError('{!r} is not a valid datetime', obj)
- obj = obj[:-1]
- if '.' in obj:
- obj, milli = obj.split('.', 1)
- if not milli.isdigit():
- raise ValidationError('{!r} is not a valid datetime', obj)
-
- try:
- time.strptime(obj, '%Y-%m-%dT%H:%M:%S')
- except ValueError:
- raise ValidationError('{!r} is not a valid datetime', obj)
- return obj
-
-
-def validate_utc_date(obj):
- try:
- time.strptime(obj, '%Y-%m-%d')
- except (TypeError, ValueError):
- raise ValidationError('{!r} is not a valid date', obj)
- return obj
-
-
-def validate_utc_time(obj):
- try:
- time.strptime(obj, '%H:%M:%S')
- except (TypeError, ValueError):
- raise ValidationError('{!r} is not a valid time', obj)
- return obj
-
-
-def validate_utc_millisec(obj):
- try:
- if not isinstance(obj, number_types):
- raise TypeError
- datetime.utcfromtimestamp(obj / 1000)
- except (TypeError, ValueError):
- raise ValidationError('{!r} is not a valid utc millis', obj)
- return obj
-
-
-def validate_email(obj):
- if not EMAIL.match(obj):
- raise ValidationError('{!r} is not defined')
- return obj
-
-
-def validate_hostname(obj):
- try:
- host = deepcopy(obj)
- if len(host) > 255:
- raise ValueError
- if host[-1] == '.':
- host = host[:-1]
- tokens = host.split('.')
- if not all(HOSTNAME_TOKENS.match(x) for x in tokens):
- raise ValueError
- if not HOSTNAME_LAST_TOKEN.search(tokens[-1]):
- raise ValueError
- except ValueError:
- raise ValidationError('{!r} is not a valid hostname'.format(obj))
- return obj
-
-
-def validate_ipv4(obj):
- try:
- import ipaddress
- obj = text_type(obj)
- ipaddress.IPv4Address(obj)
- except ImportError:
- raise ValidationError('IPv4 relies on ipaddress package', obj)
- except (ipaddress.AddressValueError, ipaddress.NetmaskValueError):
- raise ValidationError('{!r} does not appear to '
- 'be an IPv4 address'.format(obj))
- return obj
-
-
-def validate_ipv6(obj):
- try:
- import ipaddress
- obj = text_type(obj)
- ipaddress.IPv6Address(obj)
- except ImportError:
- raise ValidationError('IPv6 relies on ipaddress package', obj)
- except (ipaddress.AddressValueError, ipaddress.NetmaskValueError):
- raise ValidationError('{!r} does not appear to '
- 'be an IPv6 address'.format(obj))
- return obj
-
-
-def validate_regex(obj):
- # TODO implement ECMA 262 regex
- import re
- try:
- re.compile(obj)
- except:
- raise ValidationError('Not a regex', obj)
- return obj
-
-
-def validate_uri(obj):
- try:
- if ':' not in obj:
- raise ValueError('missing scheme')
- urlparse(obj)
- except Exception as error:
- logger.exception(error)
- raise ValidationError('{!r} is not an uri'.format(obj))
- return obj
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/__main__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/__main__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/btm_matcher.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/btm_matcher.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/btm_utils.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/btm_utils.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixer_base.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixer_base.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixer_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixer_util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_apply.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_apply.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_asserts.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_asserts.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_basestring.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_basestring.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_buffer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_buffer.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_dict.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_dict.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_except.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_except.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_exec.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_exec.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_execfile.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_execfile.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_exitfunc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_exitfunc.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_filter.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_filter.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_funcattrs.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_funcattrs.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_future.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_future.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_getcwdu.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_getcwdu.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_has_key.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_has_key.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_idioms.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_idioms.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_imports.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_imports2.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_imports2.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_input.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_input.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_intern.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_intern.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_isinstance.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_isinstance.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_itertools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_itertools.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_itertools_imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_itertools_imports.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_long.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_long.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_map.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_map.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_metaclass.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_metaclass.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_methodattrs.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_methodattrs.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_ne.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_ne.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_next.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_next.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_nonzero.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_nonzero.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_numliterals.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_numliterals.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_operator.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_operator.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_paren.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_paren.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_print.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_print.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_raise.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_raise.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_raw_input.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_raw_input.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_reduce.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_reduce.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_reload.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_reload.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_renames.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_renames.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_repr.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_repr.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_set_literal.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_set_literal.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_standarderror.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_standarderror.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_sys_exc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_sys_exc.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_throw.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_throw.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_tuple_params.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_tuple_params.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_types.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_types.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_unicode.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_unicode.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_urllib.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_urllib.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_ws_comma.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_ws_comma.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_xrange.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_xrange.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_xreadlines.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_xreadlines.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_zip.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/fixes/fix_zip.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/main.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/main.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/patcomp.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/patcomp.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/conv.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/conv.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/driver.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/driver.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/grammar.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/grammar.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/literals.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/literals.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/parse.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/parse.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/pgen.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/pgen.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/tokenize.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pgen2/tokenize.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pygram.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pygram.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pytree.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/pytree.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/refactor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/refactor.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/__main__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/__main__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/bom.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/bom.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/crlf.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/crlf.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/bad_order.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/bad_order.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_first.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_first.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_last.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_last.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/no_fixer_cls.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/no_fixer_cls.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/parrot_example.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/fixers/parrot_example.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/infinite_recursion.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/infinite_recursion.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/py2_test_grammar.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/py2_test_grammar.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/py3_test_grammar.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/data/py3_test_grammar.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/support.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/support.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_all_fixers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_all_fixers.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_fixers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_fixers.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_main.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_main.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_parser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_parser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_pytree.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_pytree.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_refactor.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_refactor.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/lib2to3/tests/test_util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixer_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixer_util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/__init__.py
old mode 100644
new mode 100755
index 0b562501..7de304da
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/__init__.py
@@ -50,7 +50,7 @@
'lib2to3.fixes.fix_getcwdu',
# 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
# 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
- # 'lib2to3.fixes.fix_input', # Called conditionally by libfuturize.fixes.fix_input
+ 'lib2to3.fixes.fix_input',
'lib2to3.fixes.fix_itertools',
'lib2to3.fixes.fix_itertools_imports',
'lib2to3.fixes.fix_filter',
@@ -86,7 +86,6 @@
'libfuturize.fixes.fix_future_builtins',
'libfuturize.fixes.fix_future_standard_library',
'libfuturize.fixes.fix_future_standard_library_urllib',
- 'libfuturize.fixes.fix_input',
'libfuturize.fixes.fix_metaclass',
'libpasteurize.fixes.fix_newstyle',
'libfuturize.fixes.fix_object',
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_UserDict.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_UserDict.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_absolute_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_absolute_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
old mode 100644
new mode 100755
index 37d7feec..1d419a1c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
@@ -21,6 +21,6 @@ class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix):
def transform(self, node, results):
# Reverse order:
- future_import(u"absolute_import", node)
- future_import(u"division", node)
future_import(u"print_function", node)
+ future_import(u"division", node)
+ future_import(u"absolute_import", node)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_basestring.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_basestring.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_bytes.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_bytes.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_cmp.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_cmp.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division_safe.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division_safe.py
old mode 100644
new mode 100755
index 3d5909cc..7b0f3cbd
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division_safe.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_division_safe.py
@@ -14,8 +14,10 @@
"""
import re
+import lib2to3.pytree as pytree
from lib2to3.fixer_util import Leaf, Node, Comma
from lib2to3 import fixer_base
+from lib2to3.fixer_util import syms, does_tree_import
from libfuturize.fixer_util import (token, future_import, touch_import_top,
wrap_in_fn_call)
@@ -31,8 +33,8 @@ def match_division(node):
const_re = re.compile('^[0-9]*[.][0-9]*$')
-def is_floaty(node):
- return _is_floaty(node.prev_sibling) or _is_floaty(node.next_sibling)
+def is_floaty(node, div_idx):
+ return _is_floaty(node.children[0:div_idx]) or _is_floaty(node.children[div_idx+1:])
def _is_floaty(expr):
@@ -48,6 +50,24 @@ def _is_floaty(expr):
return expr.children[0].value == u'float'
return False
+def find_division(node):
+ for i, child in enumerate(node.children):
+ if match_division(child):
+ return i
+ return False
+
+def clone_div_operands(node, div_idx):
+ children = []
+ for i, child in enumerate(node.children):
+ if i == div_idx:
+ children.append(Comma())
+ else:
+ children.append(child.clone())
+
+ # Strip any leading space for the first number:
+ children[0].prefix = u''
+
+ return children
class FixDivisionSafe(fixer_base.BaseFix):
# BM_compatible = True
@@ -72,28 +92,13 @@ def match(self, node):
matches, we can start discarding matches after the first.
"""
if node.type == self.syms.term:
- matched = False
- skip = False
- children = []
- for child in node.children:
- if skip:
- skip = False
- continue
- if match_division(child) and not is_floaty(child):
- matched = True
-
- # Strip any leading space for the first number:
- children[0].prefix = u''
-
- children = [wrap_in_fn_call("old_div",
- children + [Comma(), child.next_sibling.clone()],
- prefix=node.prefix)]
- skip = True
- else:
- children.append(child.clone())
- if matched:
- return Node(node.type, children, fixers_applied=node.fixers_applied)
-
+ div_idx = find_division(node)
+ if div_idx is not False:
+ # if expr1 or expr2 are obviously floats, we don't need to wrap in
+ # old_div, as the behavior of division between any number and a float
+ # should be the same in 2 or 3
+ if not is_floaty(node, div_idx):
+ return clone_div_operands(node, div_idx)
return False
def transform(self, node, results):
@@ -101,4 +106,4 @@ def transform(self, node, results):
return
future_import(u"division", node)
touch_import_top(u'past.utils', u'old_div', node)
- return results
+ return wrap_in_fn_call("old_div", results, prefix=node.prefix)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_execfile.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_execfile.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_builtins.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_builtins.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_standard_library.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_standard_library.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_input.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_input.py
deleted file mode 100644
index 8a43882e..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_input.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-Fixer for input.
-
-Does a check for `from builtins import input` before running the lib2to3 fixer.
-The fixer will not run when the input is already present.
-
-
-this:
- a = input()
-becomes:
- from builtins import input
- a = eval(input())
-
-and this:
- from builtins import input
- a = input()
-becomes (no change):
- from builtins import input
- a = input()
-"""
-
-import lib2to3.fixes.fix_input
-from lib2to3.fixer_util import does_tree_import
-
-
-class FixInput(lib2to3.fixes.fix_input.FixInput):
- def transform(self, node, results):
-
- if does_tree_import('builtins', 'input', node):
- return
-
- return super(FixInput, self).transform(node, results)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_metaclass.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_metaclass.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_next_call.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_next_call.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_object.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_object.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_order___future__imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_order___future__imports.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_print.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_print.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_print_with_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_print_with_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_raise.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_raise.py
old mode 100644
new mode 100755
index f7518416..3e8323de
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_raise.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_raise.py
@@ -4,39 +4,33 @@
raise -> raise
raise E -> raise E
-raise E, 5 -> raise E(5)
-raise E, 5, T -> raise E(5).with_traceback(T)
-raise E, None, T -> raise E.with_traceback(T)
+raise E, V -> raise E(V)
-raise (((E, E'), E''), E'''), 5 -> raise E(5)
-raise "foo", V, T -> warns about string exceptions
-
-raise E, (V1, V2) -> raise E(V1, V2)
-raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T)
+raise (((E, E'), E''), E'''), V -> raise E(V)
CAVEATS:
-1) "raise E, V, T" cannot be translated safely in general. If V
- is not a tuple or a (number, string, None) literal, then:
+1) "raise E, V" will be incorrectly translated if V is an exception
+ instance. The correct Python 3 idiom is
+
+ raise E from V
- raise E, V, T -> from future.utils import raise_
- raise_(E, V, T)
+ but since we can't detect instance-hood by syntax alone and since
+ any client code would have to be changed as well, we don't automate
+ this.
"""
-# Author: Collin Winter, Armin Ronacher, Mark Huang
+# Author: Collin Winter, Armin Ronacher
# Local imports
from lib2to3 import pytree, fixer_base
from lib2to3.pgen2 import token
-from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList
-
-from libfuturize.fixer_util import touch_import_top
-
+from lib2to3.fixer_util import Name, Call, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
- raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+ raise_stmt< 'raise' exc=any [',' val=any] >
"""
def transform(self, node, results):
@@ -61,47 +55,19 @@ def transform(self, node, results):
exc = exc.children[1].children[0].clone()
exc.prefix = u" "
- if "tb" in results:
- tb = results["tb"].clone()
- else:
- tb = None
-
- if "val" in results:
- val = results["val"].clone()
- if is_tuple(val):
- # Assume that exc is a subclass of Exception and call exc(*val).
- args = [c.clone() for c in val.children[1:-1]]
- exc = Call(exc, args)
- elif val.type in (token.NUMBER, token.STRING):
- # Handle numeric and string literals specially, e.g.
- # "raise Exception, 5" -> "raise Exception(5)".
- val.prefix = u""
- exc = Call(exc, [val])
- elif val.type == token.NAME and val.value == u"None":
- # Handle None specially, e.g.
- # "raise Exception, None" -> "raise Exception".
- pass
- else:
- # val is some other expression. If val evaluates to an instance
- # of exc, it should just be raised. If val evaluates to None,
- # a default instance of exc should be raised (as above). If val
- # evaluates to a tuple, exc(*val) should be called (as
- # above). Otherwise, exc(val) should be called. We can only
- # tell what to do at runtime, so defer to future.utils.raise_(),
- # which handles all of these cases.
- touch_import_top(u"future.utils", u"raise_", node)
- exc.prefix = u""
- args = [exc, Comma(), val]
- if tb is not None:
- args += [Comma(), tb]
- return Call(Name(u"raise_"), args)
-
- if tb is not None:
- tb.prefix = ""
- exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])]
+ if "val" not in results:
+ # One-argument raise
+ new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc])
+ new.prefix = node.prefix
+ return new
+
+ val = results["val"].clone()
+ if is_tuple(val):
+ args = [c.clone() for c in val.children[1:-1]]
else:
- exc_list = [exc]
+ val.prefix = u""
+ args = [val]
return pytree.Node(syms.raise_stmt,
- [Name(u"raise")] + exc_list,
+ [Name(u"raise"), Call(exc, args)],
prefix=node.prefix)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_xrange_with_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/fixes/fix_xrange_with_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/main.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libfuturize/main.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/feature_base.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/feature_base.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py
old mode 100644
new mode 100755
index a151f9f1..37897946
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py
@@ -18,7 +18,7 @@ class FixAddAllFutureImports(fixer_base.BaseFix):
run_order = 1
def transform(self, node, results):
- future_import(u"absolute_import", node)
- future_import(u"division", node)
- future_import(u"print_function", node)
future_import(u"unicode_literals", node)
+ future_import(u"print_function", node)
+ future_import(u"division", node)
+ future_import(u"absolute_import", node)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_annotations.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_annotations.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_division.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_division.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_features.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_features.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_fullargspec.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_fullargspec.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_future_builtins.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_future_builtins.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_getcwd.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_getcwd.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_imports.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_imports.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_imports2.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_imports2.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_kwargs.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_kwargs.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_memoryview.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_memoryview.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_metaclass.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_metaclass.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_newstyle.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_newstyle.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_next.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_next.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_printfunction.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_printfunction.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_raise.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_raise.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_raise_.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_raise_.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_throw.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_throw.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_unpacking.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/fixes/fix_unpacking.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/main.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/libpasteurize/main.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/_ast_util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/_ast_util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ast.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ast.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/cache.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/cache.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/cmd.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/cmd.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/codegen.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/codegen.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/compat.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/exceptions.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/exceptions.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/autohandler.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/autohandler.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/babelplugin.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/babelplugin.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/beaker_cache.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/beaker_cache.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/extract.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/extract.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/linguaplugin.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/linguaplugin.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/preprocessors.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/preprocessors.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/pygmentplugin.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/pygmentplugin.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/turbogears.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/ext/turbogears.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/filters.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/filters.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/lexer.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/lexer.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/lookup.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/lookup.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/parsetree.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/parsetree.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/pygen.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/pygen.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/pyparser.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/pyparser.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/runtime.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/runtime.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/template.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/template.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/util.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/util.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/__init__.py
old mode 100644
new mode 100755
index d331ac36..da05ed32
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/__init__.py
@@ -1,34 +1,34 @@
-import functools
+# -*- coding: utf-8 -*-
+"""
+markupsafe
+~~~~~~~~~~
+
+Implements an escape function and a Markup string to replace HTML
+special characters with safe representations.
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
import re
import string
-import typing as t
-if t.TYPE_CHECKING:
- import typing_extensions as te
+from ._compat import int_types
+from ._compat import iteritems
+from ._compat import Mapping
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import unichr
- class HasHTML(te.Protocol):
- def __html__(self) -> str:
- pass
+__version__ = "1.1.1"
-
-__version__ = "2.0.1"
+__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
_striptags_re = re.compile(r"(|<[^>]*>)")
+_entity_re = re.compile(r"&([^& ;]+);")
-def _simple_escaping_wrapper(name: str) -> t.Callable[..., "Markup"]:
- orig = getattr(str, name)
-
- @functools.wraps(orig)
- def wrapped(self: "Markup", *args: t.Any, **kwargs: t.Any) -> "Markup":
- args = _escape_argspec(list(args), enumerate(args), self.escape) # type: ignore
- _escape_argspec(kwargs, kwargs.items(), self.escape)
- return self.__class__(orig(self, *args, **kwargs))
-
- return wrapped
-
-
-class Markup(str):
+class Markup(text_type):
"""A string that is ready to be safely inserted into an HTML or XML
document, either because it was escaped or because it was marked
safe.
@@ -37,11 +37,11 @@ class Markup(str):
it to mark it safe without escaping. To escape the text, use the
:meth:`escape` class method instead.
- >>> Markup("Hello, World !")
+ >>> Markup('Hello, World !')
Markup('Hello, World !')
>>> Markup(42)
Markup('42')
- >>> Markup.escape("Hello, World !")
+ >>> Markup.escape('Hello, World !')
Markup('Hello <em>World</em>!')
This implements the ``__html__()`` interface that some frameworks
@@ -55,119 +55,132 @@ class Markup(str):
>>> Markup(Foo())
Markup('foo ')
- This is a subclass of :class:`str`. It has the same methods, but
- escapes their arguments and returns a ``Markup`` instance.
+ This is a subclass of the text type (``str`` in Python 3,
+ ``unicode`` in Python 2). It has the same methods as that type, but
+ all methods escape their arguments and return a ``Markup`` instance.
- >>> Markup("%s ") % ("foo & bar",)
+ >>> Markup('%s ') % 'foo & bar'
Markup('foo & bar ')
- >>> Markup("Hello ") + ""
+ >>> Markup('Hello ') + ''
Markup('Hello <foo>')
"""
__slots__ = ()
- def __new__(
- cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
- ) -> "Markup":
+ def __new__(cls, base=u"", encoding=None, errors="strict"):
if hasattr(base, "__html__"):
base = base.__html__()
-
if encoding is None:
- return super().__new__(cls, base)
+ return text_type.__new__(cls, base)
+ return text_type.__new__(cls, base, encoding, errors)
- return super().__new__(cls, base, encoding, errors)
-
- def __html__(self) -> "Markup":
+ def __html__(self):
return self
- def __add__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
- if isinstance(other, str) or hasattr(other, "__html__"):
- return self.__class__(super().__add__(self.escape(other)))
-
+ def __add__(self, other):
+ if isinstance(other, string_types) or hasattr(other, "__html__"):
+ return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
- def __radd__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
- if isinstance(other, str) or hasattr(other, "__html__"):
+ def __radd__(self, other):
+ if hasattr(other, "__html__") or isinstance(other, string_types):
return self.escape(other).__add__(self)
-
return NotImplemented
- def __mul__(self, num: int) -> "Markup":
- if isinstance(num, int):
- return self.__class__(super().__mul__(num))
-
- return NotImplemented # type: ignore
+ def __mul__(self, num):
+ if isinstance(num, int_types):
+ return self.__class__(text_type.__mul__(self, num))
+ return NotImplemented
__rmul__ = __mul__
- def __mod__(self, arg: t.Any) -> "Markup":
+ def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
+ return self.__class__(text_type.__mod__(self, arg))
- return self.__class__(super().__mod__(arg))
-
- def __repr__(self) -> str:
- return f"{self.__class__.__name__}({super().__repr__()})"
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
- def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "Markup":
- return self.__class__(super().join(map(self.escape, seq)))
+ def join(self, seq):
+ return self.__class__(text_type.join(self, map(self.escape, seq)))
- join.__doc__ = str.join.__doc__
+ join.__doc__ = text_type.join.__doc__
- def split( # type: ignore
- self, sep: t.Optional[str] = None, maxsplit: int = -1
- ) -> t.List["Markup"]:
- return [self.__class__(v) for v in super().split(sep, maxsplit)]
+ def split(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
- split.__doc__ = str.split.__doc__
+ split.__doc__ = text_type.split.__doc__
- def rsplit( # type: ignore
- self, sep: t.Optional[str] = None, maxsplit: int = -1
- ) -> t.List["Markup"]:
- return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
+ def rsplit(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
- rsplit.__doc__ = str.rsplit.__doc__
+ rsplit.__doc__ = text_type.rsplit.__doc__
- def splitlines(self, keepends: bool = False) -> t.List["Markup"]: # type: ignore
- return [self.__class__(v) for v in super().splitlines(keepends)]
+ def splitlines(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
- splitlines.__doc__ = str.splitlines.__doc__
+ splitlines.__doc__ = text_type.splitlines.__doc__
- def unescape(self) -> str:
+ def unescape(self):
"""Convert escaped markup back into a text string. This replaces
HTML entities with the characters they represent.
- >>> Markup("Main » About ").unescape()
+ >>> Markup('Main » About ').unescape()
'Main » About '
"""
- from html import unescape
-
- return unescape(str(self))
-
- def striptags(self) -> str:
+ from ._constants import HTML_ENTITIES
+
+ def handle_match(m):
+ name = m.group(1)
+ if name in HTML_ENTITIES:
+ return unichr(HTML_ENTITIES[name])
+ try:
+ if name[:2] in ("#x", "#X"):
+ return unichr(int(name[2:], 16))
+ elif name.startswith("#"):
+ return unichr(int(name[1:]))
+ except ValueError:
+ pass
+ # Don't modify unexpected input.
+ return m.group()
+
+ return _entity_re.sub(handle_match, text_type(self))
+
+ def striptags(self):
""":meth:`unescape` the markup, remove tags, and normalize
whitespace to single spaces.
- >>> Markup("Main »\tAbout ").striptags()
+ >>> Markup('Main »\tAbout ').striptags()
'Main » About'
"""
- stripped = " ".join(_striptags_re.sub("", self).split())
+ stripped = u" ".join(_striptags_re.sub("", self).split())
return Markup(stripped).unescape()
@classmethod
- def escape(cls, s: t.Any) -> "Markup":
+ def escape(cls, s):
"""Escape a string. Calls :func:`escape` and ensures that for
subclasses the correct type is returned.
"""
rv = escape(s)
-
if rv.__class__ is not cls:
return cls(rv)
-
return rv
+ def make_simple_escaping_wrapper(name): # noqa: B902
+ orig = getattr(text_type, name)
+
+ def func(self, *args, **kwargs):
+ args = _escape_argspec(list(args), enumerate(args), self.escape)
+ _escape_argspec(kwargs, iteritems(kwargs), self.escape)
+ return self.__class__(orig(self, *args, **kwargs))
+
+ func.__name__ = orig.__name__
+ func.__doc__ = orig.__doc__
+ return func
+
for method in (
"__getitem__",
"capitalize",
@@ -186,103 +199,129 @@ def escape(cls, s: t.Any) -> "Markup":
"swapcase",
"zfill",
):
- locals()[method] = _simple_escaping_wrapper(method)
-
- del method
+ locals()[method] = make_simple_escaping_wrapper(method)
- def partition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
- l, s, r = super().partition(self.escape(sep))
- cls = self.__class__
- return cls(l), cls(s), cls(r)
+ def partition(self, sep):
+ return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
- def rpartition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
- l, s, r = super().rpartition(self.escape(sep))
- cls = self.__class__
- return cls(l), cls(s), cls(r)
+ def rpartition(self, sep):
+ return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
- def format(self, *args: t.Any, **kwargs: t.Any) -> "Markup":
+ def format(self, *args, **kwargs):
formatter = EscapeFormatter(self.escape)
+ kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
- def __html_format__(self, format_spec: str) -> "Markup":
+ def __html_format__(self, format_spec):
if format_spec:
- raise ValueError("Unsupported format specification for Markup.")
-
+ raise ValueError("Unsupported format specification " "for Markup.")
return self
+ # not in python 3
+ if hasattr(text_type, "__getslice__"):
+ __getslice__ = make_simple_escaping_wrapper("__getslice__")
-class EscapeFormatter(string.Formatter):
- __slots__ = ("escape",)
-
- def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
- self.escape = escape
- super().__init__()
-
- def format_field(self, value: t.Any, format_spec: str) -> str:
- if hasattr(value, "__html_format__"):
- rv = value.__html_format__(format_spec)
- elif hasattr(value, "__html__"):
- if format_spec:
- raise ValueError(
- f"Format specifier {format_spec} given, but {type(value)} does not"
- " define __html_format__. A class that defines __html__ must define"
- " __html_format__ to work with format specifiers."
- )
- rv = value.__html__()
- else:
- # We need to make sure the format spec is str here as
- # otherwise the wrong callback methods are invoked.
- rv = string.Formatter.format_field(self, value, str(format_spec))
- return str(self.escape(rv))
+ del method, make_simple_escaping_wrapper
-_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
+class _MagicFormatMapping(Mapping):
+ """This class implements a dummy wrapper to fix a bug in the Python
+ standard library for string formatting.
+ See http://bugs.python.org/issue13598 for information about why
+ this is necessary.
+ """
-def _escape_argspec(
- obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
-) -> _ListOrDict:
+ def __init__(self, args, kwargs):
+ self._args = args
+ self._kwargs = kwargs
+ self._last_index = 0
+
+ def __getitem__(self, key):
+ if key == "":
+ idx = self._last_index
+ self._last_index += 1
+ try:
+ return self._args[idx]
+ except LookupError:
+ pass
+ key = str(idx)
+ return self._kwargs[key]
+
+ def __iter__(self):
+ return iter(self._kwargs)
+
+ def __len__(self):
+ return len(self._kwargs)
+
+
+if hasattr(text_type, "format"):
+
+ class EscapeFormatter(string.Formatter):
+ def __init__(self, escape):
+ self.escape = escape
+
+ def format_field(self, value, format_spec):
+ if hasattr(value, "__html_format__"):
+ rv = value.__html_format__(format_spec)
+ elif hasattr(value, "__html__"):
+ if format_spec:
+ raise ValueError(
+ "Format specifier {0} given, but {1} does not"
+ " define __html_format__. A class that defines"
+ " __html__ must define __html_format__ to work"
+ " with format specifiers.".format(format_spec, type(value))
+ )
+ rv = value.__html__()
+ else:
+ # We need to make sure the format spec is unicode here as
+ # otherwise the wrong callback methods are invoked. For
+ # instance a byte string there would invoke __str__ and
+ # not __unicode__.
+ rv = string.Formatter.format_field(self, value, text_type(format_spec))
+ return text_type(self.escape(rv))
+
+
+def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
- if isinstance(value, str) or hasattr(value, "__html__"):
+ if hasattr(value, "__html__") or isinstance(value, string_types):
obj[key] = escape(value)
-
return obj
-class _MarkupEscapeHelper:
- """Helper for :meth:`Markup.__mod__`."""
+class _MarkupEscapeHelper(object):
+ """Helper for Markup.__mod__"""
- __slots__ = ("obj", "escape")
-
- def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
+ def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
- def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
+ def __getitem__(self, item):
return _MarkupEscapeHelper(self.obj[item], self.escape)
- def __str__(self) -> str:
- return str(self.escape(self.obj))
+ def __str__(self):
+ return text_type(self.escape(self.obj))
+
+ __unicode__ = __str__
- def __repr__(self) -> str:
+ def __repr__(self):
return str(self.escape(repr(self.obj)))
- def __int__(self) -> int:
+ def __int__(self):
return int(self.obj)
- def __float__(self) -> float:
+ def __float__(self):
return float(self.obj)
-# circular import
+# we have to import it down here as the speedups and native
+# modules imports the markup type which is define above.
try:
- from ._speedups import escape as escape
- from ._speedups import escape_silent as escape_silent
- from ._speedups import soft_str as soft_str
- from ._speedups import soft_unicode
+ from ._speedups import escape, escape_silent, soft_unicode
except ImportError:
- from ._native import escape as escape
- from ._native import escape_silent as escape_silent # noqa: F401
- from ._native import soft_str as soft_str # noqa: F401
- from ._native import soft_unicode # noqa: F401
+ from ._native import escape, escape_silent, soft_unicode
+
+if not PY2:
+ soft_str = soft_unicode
+ __all__.append("soft_str")
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_compat.py
new file mode 100755
index 00000000..bc05090f
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_compat.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe._compat
+~~~~~~~~~~~~~~~~~~
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if not PY2:
+ text_type = str
+ string_types = (str,)
+ unichr = chr
+ int_types = (int,)
+
+ def iteritems(x):
+ return iter(x.items())
+
+ from collections.abc import Mapping
+
+else:
+ text_type = unicode
+ string_types = (str, unicode)
+ unichr = unichr
+ int_types = (int, long)
+
+ def iteritems(x):
+ return x.iteritems()
+
+ from collections import Mapping
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_constants.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_constants.py
new file mode 100755
index 00000000..7c57c2d2
--- /dev/null
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_constants.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe._constants
+~~~~~~~~~~~~~~~~~~~~~
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+
+HTML_ENTITIES = {
+ "AElig": 198,
+ "Aacute": 193,
+ "Acirc": 194,
+ "Agrave": 192,
+ "Alpha": 913,
+ "Aring": 197,
+ "Atilde": 195,
+ "Auml": 196,
+ "Beta": 914,
+ "Ccedil": 199,
+ "Chi": 935,
+ "Dagger": 8225,
+ "Delta": 916,
+ "ETH": 208,
+ "Eacute": 201,
+ "Ecirc": 202,
+ "Egrave": 200,
+ "Epsilon": 917,
+ "Eta": 919,
+ "Euml": 203,
+ "Gamma": 915,
+ "Iacute": 205,
+ "Icirc": 206,
+ "Igrave": 204,
+ "Iota": 921,
+ "Iuml": 207,
+ "Kappa": 922,
+ "Lambda": 923,
+ "Mu": 924,
+ "Ntilde": 209,
+ "Nu": 925,
+ "OElig": 338,
+ "Oacute": 211,
+ "Ocirc": 212,
+ "Ograve": 210,
+ "Omega": 937,
+ "Omicron": 927,
+ "Oslash": 216,
+ "Otilde": 213,
+ "Ouml": 214,
+ "Phi": 934,
+ "Pi": 928,
+ "Prime": 8243,
+ "Psi": 936,
+ "Rho": 929,
+ "Scaron": 352,
+ "Sigma": 931,
+ "THORN": 222,
+ "Tau": 932,
+ "Theta": 920,
+ "Uacute": 218,
+ "Ucirc": 219,
+ "Ugrave": 217,
+ "Upsilon": 933,
+ "Uuml": 220,
+ "Xi": 926,
+ "Yacute": 221,
+ "Yuml": 376,
+ "Zeta": 918,
+ "aacute": 225,
+ "acirc": 226,
+ "acute": 180,
+ "aelig": 230,
+ "agrave": 224,
+ "alefsym": 8501,
+ "alpha": 945,
+ "amp": 38,
+ "and": 8743,
+ "ang": 8736,
+ "apos": 39,
+ "aring": 229,
+ "asymp": 8776,
+ "atilde": 227,
+ "auml": 228,
+ "bdquo": 8222,
+ "beta": 946,
+ "brvbar": 166,
+ "bull": 8226,
+ "cap": 8745,
+ "ccedil": 231,
+ "cedil": 184,
+ "cent": 162,
+ "chi": 967,
+ "circ": 710,
+ "clubs": 9827,
+ "cong": 8773,
+ "copy": 169,
+ "crarr": 8629,
+ "cup": 8746,
+ "curren": 164,
+ "dArr": 8659,
+ "dagger": 8224,
+ "darr": 8595,
+ "deg": 176,
+ "delta": 948,
+ "diams": 9830,
+ "divide": 247,
+ "eacute": 233,
+ "ecirc": 234,
+ "egrave": 232,
+ "empty": 8709,
+ "emsp": 8195,
+ "ensp": 8194,
+ "epsilon": 949,
+ "equiv": 8801,
+ "eta": 951,
+ "eth": 240,
+ "euml": 235,
+ "euro": 8364,
+ "exist": 8707,
+ "fnof": 402,
+ "forall": 8704,
+ "frac12": 189,
+ "frac14": 188,
+ "frac34": 190,
+ "frasl": 8260,
+ "gamma": 947,
+ "ge": 8805,
+ "gt": 62,
+ "hArr": 8660,
+ "harr": 8596,
+ "hearts": 9829,
+ "hellip": 8230,
+ "iacute": 237,
+ "icirc": 238,
+ "iexcl": 161,
+ "igrave": 236,
+ "image": 8465,
+ "infin": 8734,
+ "int": 8747,
+ "iota": 953,
+ "iquest": 191,
+ "isin": 8712,
+ "iuml": 239,
+ "kappa": 954,
+ "lArr": 8656,
+ "lambda": 955,
+ "lang": 9001,
+ "laquo": 171,
+ "larr": 8592,
+ "lceil": 8968,
+ "ldquo": 8220,
+ "le": 8804,
+ "lfloor": 8970,
+ "lowast": 8727,
+ "loz": 9674,
+ "lrm": 8206,
+ "lsaquo": 8249,
+ "lsquo": 8216,
+ "lt": 60,
+ "macr": 175,
+ "mdash": 8212,
+ "micro": 181,
+ "middot": 183,
+ "minus": 8722,
+ "mu": 956,
+ "nabla": 8711,
+ "nbsp": 160,
+ "ndash": 8211,
+ "ne": 8800,
+ "ni": 8715,
+ "not": 172,
+ "notin": 8713,
+ "nsub": 8836,
+ "ntilde": 241,
+ "nu": 957,
+ "oacute": 243,
+ "ocirc": 244,
+ "oelig": 339,
+ "ograve": 242,
+ "oline": 8254,
+ "omega": 969,
+ "omicron": 959,
+ "oplus": 8853,
+ "or": 8744,
+ "ordf": 170,
+ "ordm": 186,
+ "oslash": 248,
+ "otilde": 245,
+ "otimes": 8855,
+ "ouml": 246,
+ "para": 182,
+ "part": 8706,
+ "permil": 8240,
+ "perp": 8869,
+ "phi": 966,
+ "pi": 960,
+ "piv": 982,
+ "plusmn": 177,
+ "pound": 163,
+ "prime": 8242,
+ "prod": 8719,
+ "prop": 8733,
+ "psi": 968,
+ "quot": 34,
+ "rArr": 8658,
+ "radic": 8730,
+ "rang": 9002,
+ "raquo": 187,
+ "rarr": 8594,
+ "rceil": 8969,
+ "rdquo": 8221,
+ "real": 8476,
+ "reg": 174,
+ "rfloor": 8971,
+ "rho": 961,
+ "rlm": 8207,
+ "rsaquo": 8250,
+ "rsquo": 8217,
+ "sbquo": 8218,
+ "scaron": 353,
+ "sdot": 8901,
+ "sect": 167,
+ "shy": 173,
+ "sigma": 963,
+ "sigmaf": 962,
+ "sim": 8764,
+ "spades": 9824,
+ "sub": 8834,
+ "sube": 8838,
+ "sum": 8721,
+ "sup": 8835,
+ "sup1": 185,
+ "sup2": 178,
+ "sup3": 179,
+ "supe": 8839,
+ "szlig": 223,
+ "tau": 964,
+ "there4": 8756,
+ "theta": 952,
+ "thetasym": 977,
+ "thinsp": 8201,
+ "thorn": 254,
+ "tilde": 732,
+ "times": 215,
+ "trade": 8482,
+ "uArr": 8657,
+ "uacute": 250,
+ "uarr": 8593,
+ "ucirc": 251,
+ "ugrave": 249,
+ "uml": 168,
+ "upsih": 978,
+ "upsilon": 965,
+ "uuml": 252,
+ "weierp": 8472,
+ "xi": 958,
+ "yacute": 253,
+ "yen": 165,
+ "yuml": 255,
+ "zeta": 950,
+ "zwj": 8205,
+ "zwnj": 8204,
+}
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_native.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_native.py
old mode 100644
new mode 100755
index 6f7eb7a8..cd08752c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_native.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_native.py
@@ -1,9 +1,18 @@
-import typing as t
+# -*- coding: utf-8 -*-
+"""
+markupsafe._native
+~~~~~~~~~~~~~~~~~~
+Native Python implementation used when the C module is not compiled.
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
from . import Markup
+from ._compat import text_type
-def escape(s: t.Any) -> Markup:
+def escape(s):
"""Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
the string with HTML-safe sequences. Use this if you need to display
text that might contain such characters in HTML.
@@ -16,9 +25,8 @@ def escape(s: t.Any) -> Markup:
"""
if hasattr(s, "__html__"):
return Markup(s.__html__())
-
return Markup(
- str(s)
+ text_type(s)
.replace("&", "&")
.replace(">", ">")
.replace("<", "<")
@@ -27,7 +35,7 @@ def escape(s: t.Any) -> Markup:
)
-def escape_silent(s: t.Optional[t.Any]) -> Markup:
+def escape_silent(s):
"""Like :func:`escape` but treats ``None`` as the empty string.
Useful with optional values, as otherwise you get the string
``'None'`` when the value is ``None``.
@@ -39,37 +47,23 @@ def escape_silent(s: t.Optional[t.Any]) -> Markup:
"""
if s is None:
return Markup()
-
return escape(s)
-def soft_str(s: t.Any) -> str:
+def soft_unicode(s):
"""Convert an object to a string if it isn't already. This preserves
a :class:`Markup` string rather than converting it back to a basic
string, so it will still be marked as safe and won't be escaped
again.
- >>> value = escape("")
+ >>> value = escape('')
>>> value
Markup('<User 1>')
>>> escape(str(value))
Markup('<User 1>')
- >>> escape(soft_str(value))
+ >>> escape(soft_unicode(value))
Markup('<User 1>')
"""
- if not isinstance(s, str):
- return str(s)
-
+ if not isinstance(s, text_type):
+ s = text_type(s)
return s
-
-
-def soft_unicode(s: t.Any) -> str:
- import warnings
-
- warnings.warn(
- "'soft_unicode' has been renamed to 'soft_str'. The old name"
- " will be removed in MarkupSafe 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return soft_str(s)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.c b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.c
index 44967b1f..12d2c4a7 100644
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.c
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.c
@@ -1,5 +1,23 @@
+/**
+ * markupsafe._speedups
+ * ~~~~~~~~~~~~~~~~~~~~
+ *
+ * C implementation of escaping for better performance. Used instead of
+ * the native Python implementation when compiled.
+ *
+ * :copyright: 2010 Pallets
+ * :license: BSD-3-Clause
+ */
#include
+#if PY_MAJOR_VERSION < 3
+#define ESCAPED_CHARS_TABLE_SIZE 63
+#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
+
+static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
+static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
+#endif
+
static PyObject* markup;
static int
@@ -7,6 +25,21 @@ init_constants(void)
{
PyObject *module;
+#if PY_MAJOR_VERSION < 3
+ /* mapping of characters to replace */
+ escaped_chars_repl['"'] = UNICHR(""");
+ escaped_chars_repl['\''] = UNICHR("'");
+ escaped_chars_repl['&'] = UNICHR("&");
+ escaped_chars_repl['<'] = UNICHR("<");
+ escaped_chars_repl['>'] = UNICHR(">");
+
+ /* lengths of those characters when replaced - 1 */
+ memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
+ escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
+ escaped_chars_delta_len['&'] = 4;
+ escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
+#endif
+
/* import markup type so that we can mark the return value */
module = PyImport_ImportModule("markupsafe");
if (!module)
@@ -17,74 +50,137 @@ init_constants(void)
return 1;
}
+#if PY_MAJOR_VERSION < 3
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+ PyUnicodeObject *out;
+ Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
+ const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
+ Py_UNICODE *next_escp;
+ Py_UNICODE *outp;
+ Py_ssize_t delta=0, erepl=0, delta_len=0;
+
+ /* First we need to figure out how long the escaped string will be */
+ while (*(inp) || inp < inp_end) {
+ if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
+ delta += escaped_chars_delta_len[*inp];
+ erepl += !!escaped_chars_delta_len[*inp];
+ }
+ ++inp;
+ }
+
+ /* Do we need to escape anything at all? */
+ if (!erepl) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
+ if (!out)
+ return NULL;
+
+ outp = PyUnicode_AS_UNICODE(out);
+ inp = PyUnicode_AS_UNICODE(in);
+ while (erepl-- > 0) {
+ /* look for the next substitution */
+ next_escp = inp;
+ while (next_escp < inp_end) {
+ if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
+ (delta_len = escaped_chars_delta_len[*next_escp])) {
+ ++delta_len;
+ break;
+ }
+ ++next_escp;
+ }
+
+ if (next_escp > inp) {
+ /* copy unescaped chars between inp and next_escp */
+ Py_UNICODE_COPY(outp, inp, next_escp-inp);
+ outp += next_escp - inp;
+ }
+
+ /* escape 'next_escp' */
+ Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
+ outp += delta_len;
+
+ inp = next_escp + 1;
+ }
+ if (inp < inp_end)
+ Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
+
+ return (PyObject*)out;
+}
+#else /* PY_MAJOR_VERSION < 3 */
+
#define GET_DELTA(inp, inp_end, delta) \
- while (inp < inp_end) { \
- switch (*inp++) { \
- case '"': \
- case '\'': \
- case '&': \
- delta += 4; \
- break; \
- case '<': \
- case '>': \
- delta += 3; \
- break; \
- } \
+ while (inp < inp_end) { \
+ switch (*inp++) { \
+ case '"': \
+ case '\'': \
+ case '&': \
+ delta += 4; \
+ break; \
+ case '<': \
+ case '>': \
+ delta += 3; \
+ break; \
+ } \
}
#define DO_ESCAPE(inp, inp_end, outp) \
- { \
- Py_ssize_t ncopy = 0; \
- while (inp < inp_end) { \
- switch (*inp) { \
- case '"': \
+ { \
+ Py_ssize_t ncopy = 0; \
+ while (inp < inp_end) { \
+ switch (*inp) { \
+ case '"': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
- *outp++ = '&'; \
- *outp++ = '#'; \
- *outp++ = '3'; \
- *outp++ = '4'; \
- *outp++ = ';'; \
- break; \
- case '\'': \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '4'; \
+ *outp++ = ';'; \
+ break; \
+ case '\'': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
- *outp++ = '&'; \
- *outp++ = '#'; \
- *outp++ = '3'; \
- *outp++ = '9'; \
- *outp++ = ';'; \
- break; \
- case '&': \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '9'; \
+ *outp++ = ';'; \
+ break; \
+ case '&': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
- *outp++ = '&'; \
- *outp++ = 'a'; \
- *outp++ = 'm'; \
- *outp++ = 'p'; \
- *outp++ = ';'; \
- break; \
- case '<': \
+ *outp++ = '&'; \
+ *outp++ = 'a'; \
+ *outp++ = 'm'; \
+ *outp++ = 'p'; \
+ *outp++ = ';'; \
+ break; \
+ case '<': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
- *outp++ = '&'; \
- *outp++ = 'l'; \
- *outp++ = 't'; \
- *outp++ = ';'; \
- break; \
- case '>': \
+ *outp++ = '&'; \
+ *outp++ = 'l'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ case '>': \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
outp += ncopy; ncopy = 0; \
- *outp++ = '&'; \
- *outp++ = 'g'; \
- *outp++ = 't'; \
- *outp++ = ';'; \
- break; \
- default: \
+ *outp++ = '&'; \
+ *outp++ = 'g'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ default: \
ncopy++; \
- } \
- inp++; \
- } \
+ } \
+ inp++; \
+ } \
memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
}
@@ -182,6 +278,7 @@ escape_unicode(PyUnicodeObject *in)
assert(0); /* shouldn't happen */
return NULL;
}
+#endif /* PY_MAJOR_VERSION < 3 */
static PyObject*
escape(PyObject *self, PyObject *text)
@@ -190,7 +287,11 @@ escape(PyObject *self, PyObject *text)
PyObject *s = NULL, *rv = NULL, *html;
if (id_html == NULL) {
+#if PY_MAJOR_VERSION < 3
+ id_html = PyString_InternFromString("__html__");
+#else
id_html = PyUnicode_InternFromString("__html__");
+#endif
if (id_html == NULL) {
return NULL;
}
@@ -198,8 +299,11 @@ escape(PyObject *self, PyObject *text)
/* we don't have to escape integers, bools or floats */
if (PyLong_CheckExact(text) ||
- PyFloat_CheckExact(text) || PyBool_Check(text) ||
- text == Py_None)
+#if PY_MAJOR_VERSION < 3
+ PyInt_CheckExact(text) ||
+#endif
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
+ text == Py_None)
return PyObject_CallFunctionObjArgs(markup, text, NULL);
/* if the object has an __html__ method that performs the escaping */
@@ -219,7 +323,11 @@ escape(PyObject *self, PyObject *text)
/* otherwise make the object unicode if it isn't, then escape */
PyErr_Clear();
if (!PyUnicode_Check(text)) {
+#if PY_MAJOR_VERSION < 3
+ PyObject *unicode = PyObject_Unicode(text);
+#else
PyObject *unicode = PyObject_Str(text);
+#endif
if (!unicode)
return NULL;
s = escape_unicode((PyUnicodeObject*)unicode);
@@ -245,80 +353,54 @@ escape_silent(PyObject *self, PyObject *text)
static PyObject*
-soft_str(PyObject *self, PyObject *s)
+soft_unicode(PyObject *self, PyObject *s)
{
if (!PyUnicode_Check(s))
+#if PY_MAJOR_VERSION < 3
+ return PyObject_Unicode(s);
+#else
return PyObject_Str(s);
+#endif
Py_INCREF(s);
return s;
}
-static PyObject*
-soft_unicode(PyObject *self, PyObject *s)
+static PyMethodDef module_methods[] = {
+ {"escape", (PyCFunction)escape, METH_O,
+ "escape(s) -> markup\n\n"
+ "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
+ "sequences. Use this if you need to display text that might contain\n"
+ "such characters in HTML. Marks return value as markup string."},
+ {"escape_silent", (PyCFunction)escape_silent, METH_O,
+ "escape_silent(s) -> markup\n\n"
+ "Like escape but converts None to an empty string."},
+ {"soft_unicode", (PyCFunction)soft_unicode, METH_O,
+ "soft_unicode(object) -> string\n\n"
+ "Make a string unicode if it isn't already. That way a markup\n"
+ "string is not converted back to unicode."},
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+
+#if PY_MAJOR_VERSION < 3
+
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+init_speedups(void)
{
- PyErr_WarnEx(
- PyExc_DeprecationWarning,
- "'soft_unicode' has been renamed to 'soft_str'. The old name"
- " will be removed in MarkupSafe 2.1.",
- 2
- );
- return soft_str(self, s);
-}
+ if (!init_constants())
+ return;
+ Py_InitModule3("markupsafe._speedups", module_methods, "");
+}
-static PyMethodDef module_methods[] = {
- {
- "escape",
- (PyCFunction)escape,
- METH_O,
- "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
- " the string with HTML-safe sequences. Use this if you need to display"
- " text that might contain such characters in HTML.\n\n"
- "If the object has an ``__html__`` method, it is called and the"
- " return value is assumed to already be safe for HTML.\n\n"
- ":param s: An object to be converted to a string and escaped.\n"
- ":return: A :class:`Markup` string with the escaped text.\n"
- },
- {
- "escape_silent",
- (PyCFunction)escape_silent,
- METH_O,
- "Like :func:`escape` but treats ``None`` as the empty string."
- " Useful with optional values, as otherwise you get the string"
- " ``'None'`` when the value is ``None``.\n\n"
- ">>> escape(None)\n"
- "Markup('None')\n"
- ">>> escape_silent(None)\n"
- "Markup('')\n"
- },
- {
- "soft_str",
- (PyCFunction)soft_str,
- METH_O,
- "Convert an object to a string if it isn't already. This preserves"
- " a :class:`Markup` string rather than converting it back to a basic"
- " string, so it will still be marked as safe and won't be escaped"
- " again.\n\n"
- ">>> value = escape(\"\")\n"
- ">>> value\n"
- "Markup('<User 1>')\n"
- ">>> escape(str(value))\n"
- "Markup('<User 1>')\n"
- ">>> escape(soft_str(value))\n"
- "Markup('<User 1>')\n"
- },
- {
- "soft_unicode",
- (PyCFunction)soft_unicode,
- METH_O,
- ""
- },
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
+#else /* Python 3.x module initialization */
static struct PyModuleDef module_definition = {
- PyModuleDef_HEAD_INIT,
+ PyModuleDef_HEAD_INIT,
"markupsafe._speedups",
NULL,
-1,
@@ -337,3 +419,5 @@ PyInit__speedups(void)
return PyModule_Create(&module_definition);
}
+
+#endif
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.pyi b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.pyi
deleted file mode 100644
index f673240f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/_speedups.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Any
-from typing import Optional
-
-from . import Markup
-
-def escape(s: Any) -> Markup: ...
-def escape_silent(s: Optional[Any]) -> Markup: ...
-def soft_str(s: Any) -> str: ...
-def soft_unicode(s: Any) -> str: ...
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/py.typed b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/markupsafe/py.typed
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/base_modinput.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/base_modinput.py
index e2c8897f..2079e0ad 100755
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/base_modinput.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/modinput_wrapper/base_modinput.py
@@ -7,7 +7,7 @@
import json
import tempfile
-from splunklib import modularinput as smi
+from solnlib.packages.splunklib import modularinput as smi
from solnlib.log import Logs
from solnlib.modular_input import checkpointer
from solnlib import utils as sutils
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/INSTALLER b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/INSTALLER
deleted file mode 100644
index a1b589e3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/LICENSE.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/LICENSE.txt
deleted file mode 100644
index e7646684..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2010 David Schoonover
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/METADATA b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/METADATA
deleted file mode 100644
index cbe81bd3..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/METADATA
+++ /dev/null
@@ -1,26 +0,0 @@
-Metadata-Version: 2.1
-Name: munch
-Version: 2.3.2
-Summary: A dot-accessible dictionary (a la JavaScript objects).
-Home-page: http://github.com/Infinidat/munch
-Author: Rotem Yaari
-Author-email: vmalloc@gmail.com
-License: MIT
-Keywords: munch,dict,mapping,container,collection
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Topic :: Software Development
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Utilities
-Classifier: License :: OSI Approved :: MIT License
-License-File: LICENSE.txt
-Requires-Dist: six
-
-UNKNOWN
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/RECORD b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/RECORD
deleted file mode 100644
index 18a88520..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/RECORD
+++ /dev/null
@@ -1,9 +0,0 @@
-munch-2.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-munch-2.3.2.dist-info/LICENSE.txt,sha256=V8qVySBZyDgGJRkkYpeb0ymUquP835Av9useRn7rBGk,1079
-munch-2.3.2.dist-info/METADATA,sha256=8f76jlBWV-he4FEQir_-TvfmKxfPNqm5bwIvbE6gims,865
-munch-2.3.2.dist-info/RECORD,,
-munch-2.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-munch-2.3.2.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110
-munch-2.3.2.dist-info/top_level.txt,sha256=PRHN8MYaV54issXsc-3Sde-NdKBLL7BXsafd7Haw8IE,6
-munch/__init__.py,sha256=UQvBwwtPbqAzNzIADInzc1fWpX38nFztUDboLQigu0o,16556
-munch/python3_compat.py,sha256=fguSsh5lZOxfWI6r4hvima0N8dYn167fXqpRkqu5OEw,71
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/REQUESTED b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/REQUESTED
deleted file mode 100644
index e69de29b..00000000
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/WHEEL b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/WHEEL
deleted file mode 100644
index 0b18a281..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.37.1)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/top_level.txt b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/top_level.txt
deleted file mode 100644
index 3d7d7ecd..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch-2.3.2.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-munch
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch/python3_compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/munch/python3_compat.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/__init__.py
old mode 100644
new mode 100755
index 14713039..3b5d9db1
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/__init__.py
@@ -61,7 +61,7 @@
$ python3
- >>> from past.translation import autotranslate
+ >>> from past import autotranslate
>>> authotranslate('mypy2module')
>>> import mypy2module
@@ -74,16 +74,18 @@
Credits
-------
-:Author: Ed Schofield, Jordan M. Adler, et al
+:Author: Ed Schofield
:Sponsor: Python Charmers Pty Ltd, Australia: http://pythoncharmers.com
Licensing
---------
-Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
+Copyright 2013-2018 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
+
+from past.translation import install_hooks as autotranslate
from future import __version__, __copyright__, __license__
__title__ = 'past'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/misc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/misc.py
old mode 100644
new mode 100755
index ba50aa9e..06fbb92d
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/misc.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/misc.py
@@ -1,17 +1,13 @@
from __future__ import unicode_literals
-
+import sys
import inspect
+from collections import Mapping
-from future.utils import PY2, PY3, exec_
+from future.utils import PY3, exec_
-if PY2:
- from collections import Mapping
-else:
- from collections.abc import Mapping
if PY3:
import builtins
- from collections.abc import Mapping
def apply(f, *args, **kw):
return f(*args, **kw)
@@ -48,7 +44,6 @@ def oct(number):
xrange = range
else:
import __builtin__
- from collections import Mapping
apply = __builtin__.apply
chr = __builtin__.chr
cmp = __builtin__.cmp
@@ -81,7 +76,7 @@ def execfile(filename, myglobals=None, mylocals=None):
raise TypeError('globals must be a mapping')
if not isinstance(mylocals, Mapping):
raise TypeError('locals must be a mapping')
- with open(filename, "rb") as fin:
+ with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec_(code, myglobals, mylocals)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/noniterators.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/noniterators.py
old mode 100644
new mode 100755
index 183ffffd..5826b97c
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/noniterators.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/builtins/noniterators.py
@@ -72,7 +72,7 @@ def oldmap(func, *iterables):
>>> oldmap(None, range(4))
[0, 1, 2, 3]
- More test cases are in test_past.test_builtins.
+ More test cases are in past.tests.test_builtins.
"""
zipped = itertools.zip_longest(*iterables)
l = list(zipped)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/tests/__init__.py
old mode 100644
new mode 100755
similarity index 100%
rename from Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/__init__.py
rename to Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/tests/__init__.py
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/translation/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/translation/__init__.py
old mode 100644
new mode 100755
index 7c678866..c7ae2b7a
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/translation/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/translation/__init__.py
@@ -16,7 +16,7 @@
Once your Py2 package is installed in the usual module search path, the import
hook is invoked as follows:
- >>> from past.translation import autotranslate
+ >>> from past import autotranslate
>>> autotranslate('mypackagename')
Or:
@@ -219,9 +219,22 @@ def detect_python2(source, pathname):
if source != str(tree)[:-1]: # remove added newline
# The above fixers made changes, so we conclude it's Python 2 code
logger.debug('Detected Python 2 code: {0}'.format(pathname))
+ with open('/tmp/original_code.py', 'w') as f:
+ f.write('### Original code (detected as py2): %s\n%s' %
+ (pathname, source))
+ with open('/tmp/py2_detection_code.py', 'w') as f:
+ f.write('### Code after running py3 detection (from %s)\n%s' %
+ (pathname, str(tree)[:-1]))
return True
else:
logger.debug('Detected Python 3 code: {0}'.format(pathname))
+ with open('/tmp/original_code.py', 'w') as f:
+ f.write('### Original code (detected as py3): %s\n%s' %
+ (pathname, source))
+ try:
+ os.remove('/tmp/futurize_code.py')
+ except OSError:
+ pass
return False
@@ -382,6 +395,9 @@ def load_module(self, fullname):
if detect_python2(source, self.pathname):
source = self.transform(source)
+ with open('/tmp/futurized_code.py', 'w') as f:
+ f.write('### Futurized code (from %s)\n%s' %
+ (self.pathname, source))
code = compile(source, self.pathname, 'exec')
@@ -416,7 +432,7 @@ def install_hooks(include_paths=(), exclude_paths=()):
_hook.include(include_paths)
_hook.exclude(exclude_paths)
# _hook.debug = debug
- enable = sys.version_info[0] >= 3 # enabled for all 3.x+
+ enable = sys.version_info[0] >= 3 # enabled for all 3.x
if enable and _hook not in sys.meta_path:
sys.meta_path.insert(0, _hook) # insert at beginning. This could be made a parameter
@@ -479,7 +495,3 @@ def __enter__(self):
def __exit__(self, *args):
if self.hooks_were_installed:
install_hooks()
-
-
-# alias
-autotranslate = install_hooks
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/__init__.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/basestring.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/basestring.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/olddict.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/olddict.py
old mode 100644
new mode 100755
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/oldstr.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/oldstr.py
old mode 100644
new mode 100755
index a477d884..7768d328
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/oldstr.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/types/oldstr.py
@@ -2,14 +2,11 @@
Pure-Python implementation of a Python 2-like str object for Python 3.
"""
+from collections import Iterable
from numbers import Integral
from past.utils import PY2, with_metaclass
-if PY2:
- from collections import Iterable
-else:
- from collections.abc import Iterable
_builtin_bytes = bytes
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/utils/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/utils/__init__.py
old mode 100644
new mode 100755
index f6b2642d..c6606d0b
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/utils/__init__.py
+++ b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/past/utils/__init__.py
@@ -16,7 +16,7 @@
import sys
import numbers
-PY3 = sys.version_info[0] >= 3
+PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/__init__.py
deleted file mode 100644
index d59226af..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/__init__.py
+++ /dev/null
@@ -1,3296 +0,0 @@
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
-import errno
-import tempfile
-import textwrap
-import itertools
-import inspect
-import ntpath
-import posixpath
-import importlib
-from pkgutil import get_importer
-
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
-try:
- FileExistsError
-except NameError:
- FileExistsError = OSError
-
-# capture these to bypass sandboxing
-from os import utime
-try:
- from os import mkdir, rename, unlink
- WRITE_SUPPORT = True
-except ImportError:
- # no write support, probably under GAE
- WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
- import importlib.machinery as importlib_machinery
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
-from pkg_resources.extern.jaraco.text import (
- yield_lines,
- drop_comment,
- join_continuation,
-)
-
-from pkg_resources.extern import appdirs
-from pkg_resources.extern import packaging
-__import__('pkg_resources.extern.packaging.version')
-__import__('pkg_resources.extern.packaging.specifiers')
-__import__('pkg_resources.extern.packaging.requirements')
-__import__('pkg_resources.extern.packaging.markers')
-__import__('pkg_resources.extern.packaging.utils')
-
-if sys.version_info < (3, 5):
- raise RuntimeError("Python 3.5 or later is required")
-
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
-add_activation_listener = None
-resources_stream = None
-cleanup_resources = None
-resource_dir = None
-resource_stream = None
-set_extraction_path = None
-resource_isdir = None
-resource_string = None
-iter_entry_points = None
-resource_listdir = None
-resource_filename = None
-resource_exists = None
-_distribution_finders = None
-_namespace_handlers = None
-_namespace_packages = None
-
-
-class PEP440Warning(RuntimeWarning):
- """
- Used when there is an issue with a version or specifier not complying with
- PEP 440.
- """
-
-
-def parse_version(v):
- try:
- return packaging.version.Version(v)
- except packaging.version.InvalidVersion:
- warnings.warn(
- f"{v} is an invalid version and will not be supported in "
- "a future release",
- PkgResourcesDeprecationWarning,
- )
- return packaging.version.LegacyVersion(v)
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
- globals().update(kw)
- _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.items():
- state[k] = g['_sget_' + v](g[k])
- return state
-
-
-def __setstate__(state):
- g = globals()
- for k, v in state.items():
- g['_sset_' + _state_vars[k]](k, g[k], v)
- return state
-
-
-def _sget_dict(val):
- return val.copy()
-
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-
-def _sget_object(val):
- return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
- of macOS that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
- version of macOS that we are *running*. To allow usage of packages that
- explicitly require a newer version of macOS, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform()
- m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
- plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
- except ValueError:
- # not macOS
- pass
- return plat
-
-
-__all__ = [
- # Basic resource access and distribution/entry point discovery
- 'require', 'run_script', 'get_provider', 'get_distribution',
- 'load_entry_point', 'get_entry_map', 'get_entry_info',
- 'iter_entry_points',
- 'resource_string', 'resource_stream', 'resource_filename',
- 'resource_listdir', 'resource_exists', 'resource_isdir',
-
- # Environmental control
- 'declare_namespace', 'working_set', 'add_activation_listener',
- 'find_distributions', 'set_extraction_path', 'cleanup_resources',
- 'get_default_cache',
-
- # Primary implementation classes
- 'Environment', 'WorkingSet', 'ResourceManager',
- 'Distribution', 'Requirement', 'EntryPoint',
-
- # Exceptions
- 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
- 'UnknownExtra', 'ExtractionError',
-
- # Warnings
- 'PEP440Warning',
-
- # Parsing functions and string utilities
- 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
- 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
- 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
-
- # filesystem utilities
- 'ensure_directory', 'normalize_path',
-
- # Distribution "precedence" constants
- 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
- 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
- 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
- 'register_finder', 'register_namespace_handler', 'register_loader_type',
- 'fixup_namespace_packages', 'get_importer',
-
- # Warnings
- 'PkgResourcesDeprecationWarning',
-
- # Deprecated/backward compatibility only
- 'run_main', 'AvailableDistributions',
-]
-
-
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
-
- def __repr__(self):
- return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
- """
- An already-installed version conflicts with the requested version.
-
- Should be initialized with the installed Distribution and the requested
- Requirement.
- """
-
- _template = "{self.dist} is installed but {self.req} is required"
-
- @property
- def dist(self):
- return self.args[0]
-
- @property
- def req(self):
- return self.args[1]
-
- def report(self):
- return self._template.format(**locals())
-
- def with_context(self, required_by):
- """
- If required_by is non-empty, return a version of self that is a
- ContextualVersionConflict.
- """
- if not required_by:
- return self
- args = self.args + (required_by,)
- return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
- """
- A VersionConflict that accepts a third parameter, the set of the
- requirements that required the installed Distribution.
- """
-
- _template = VersionConflict._template + ' by {self.required_by}'
-
- @property
- def required_by(self):
- return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
- _template = ("The '{self.req}' distribution was not found "
- "and is required by {self.requirers_str}")
-
- @property
- def req(self):
- return self.args[0]
-
- @property
- def requirers(self):
- return self.args[1]
-
- @property
- def requirers_str(self):
- if not self.requirers:
- return 'the application'
- return ', '.join(self.requirers)
-
- def report(self):
- return self._template.format(**locals())
-
- def __str__(self):
- return self.report()
-
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
-PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq, Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
-
-def _macos_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
-
-
-def _macos_arch(machine):
- return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
-
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
- needs some hacks for Linux and macOS.
- """
- from sysconfig import get_platform
-
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
- version = _macos_vers()
- machine = os.uname()[4].replace(" ", "_")
- return "macosx-%d.%d-%s" % (
- int(version[0]), int(version[1]),
- _macos_arch(machine),
- )
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
- if provided is None or required is None or provided == required:
- # easy case
- return True
-
- # macOS special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
- # use the new macOS designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if dversion == 7 and macosversion >= "10.3" or \
- dversion == 8 and macosversion >= "10.4":
- return True
- # egg isn't macOS or legacy darwin
- return False
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or \
- provMac.group(3) != reqMac.group(3):
- return False
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
- if isinstance(dist, str):
- dist = Requirement.parse(dist)
- if isinstance(dist, Requirement):
- dist = get_provider(dist)
- if not isinstance(dist, Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
-class WorkingSet:
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.normalized_to_canonical_keys = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
- @classmethod
- def _build_master(cls):
- """
- Prepare the master working set.
- """
- ws = cls()
- try:
- from __main__ import __requires__
- except ImportError:
- # The main program does not list any requirements
- return ws
-
- # ensure the requirements are met
- try:
- ws.require(__requires__)
- except VersionConflict:
- return cls._build_from_requirements(__requires__)
-
- return ws
-
- @classmethod
- def _build_from_requirements(cls, req_spec):
- """
- Build a working set from a requirement spec. Rewrites sys.path.
- """
- # try it without defaults already on sys.path
- # by starting with an empty path
- ws = cls([])
- reqs = parse_requirements(req_spec)
- dists = ws.resolve(reqs, Environment())
- for dist in dists:
- ws.add(dist)
-
- # add any missing entries from sys.path
- for entry in sys.path:
- if entry not in ws.entries:
- ws.add_entry(entry)
-
- # then copy back to sys.path
- sys.path[:] = ws.entries
- return ws
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
- def __contains__(self, dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
-
- if dist is None:
- canonical_key = self.normalized_to_canonical_keys.get(req.key)
-
- if canonical_key is not None:
- req.key = canonical_key
- dist = self.by_key.get(canonical_key)
-
- if dist is not None and dist not in req:
- # XXX add more info
- raise VersionConflict(dist, req)
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
- return (
- entry
- for dist in self
- for entry in dist.get_entry_map(group).values()
- if name is None or name == entry.name
- )
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- if item not in self.entry_keys:
- # workaround a cache issue
- continue
-
- for key in self.entry_keys[item]:
- if key not in seen:
- seen[key] = 1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True, replace=False):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set, unless `replace=True`.
- If it's added, any callbacks registered with the ``subscribe()`` method
- will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry, replace=replace)
-
- if entry is None:
- entry = dist.location
- keys = self.entry_keys.setdefault(entry, [])
- keys2 = self.entry_keys.setdefault(dist.location, [])
- if not replace and dist.key in self.by_key:
- # ignore hidden distros
- return
-
- self.by_key[dist.key] = dist
- normalized_name = packaging.utils.canonicalize_name(dist.key)
- self.normalized_to_canonical_keys[normalized_name] = dist.key
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
- # FIXME: 'WorkingSet.resolve' is too complex (11)
- def resolve(self, requirements, env=None, installer=None, # noqa: C901
- replace_conflicting=False, extras=None):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
-
- Unless `replace_conflicting=True`, raises a VersionConflict exception
- if
- any requirements are found on the path that have the correct name but
- the wrong version. Otherwise, if an `installer` is supplied it will be
- invoked to obtain the correct version of the requirement and activate
- it.
-
- `extras` is a list of the extras to be used with these requirements.
- This is important because extra requirements may look like `my_req;
- extra = "my_extra"`, which would otherwise be interpreted as a purely
- optional requirement. Instead, we want to be able to assert that these
- requirements are truly required.
- """
-
- # set up the stack
- requirements = list(requirements)[::-1]
- # set of processed requirements
- processed = {}
- # key -> dist
- best = {}
- to_activate = []
-
- req_extras = _ReqExtras()
-
- # Mapping of requirement to set of distributions that required it;
- # useful for reporting info about conflicts.
- required_by = collections.defaultdict(set)
-
- while requirements:
- # process dependencies breadth-first
- req = requirements.pop(0)
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
-
- if not req_extras.markers_pass(req, extras):
- continue
-
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None or (dist not in req and replace_conflicting):
- ws = self
- if env is None:
- if dist is None:
- env = Environment(self.entries)
- else:
- # Use an empty environment and workingset to avoid
- # any further conflicts with the conflicting
- # distribution
- env = Environment([])
- ws = WorkingSet([])
- dist = best[req.key] = env.best_match(
- req, ws, installer,
- replace_conflicting=replace_conflicting
- )
- if dist is None:
- requirers = required_by.get(req, None)
- raise DistributionNotFound(req, requirers)
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- dependent_req = required_by[req]
- raise VersionConflict(dist, req).with_context(dependent_req)
-
- # push the new requirements onto the stack
- new_requirements = dist.requires(req.extras)[::-1]
- requirements.extend(new_requirements)
-
- # Register the new requirements needed by req
- for new_requirement in new_requirements:
- required_by[new_requirement].add(req.project_name)
- req_extras[new_requirement] = req.extras
-
- processed[req] = True
-
- # return list of distros to activate
- return to_activate
-
- def find_plugins(
- self, plugin_env, full_env=None, installer=None, fallback=True):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- # add plugins+libs to sys.path
- map(working_set.add, distributions)
- # display errors
- print('Could not load', errors)
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- # scan project names in alphabetic order
- plugin_projects.sort()
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- # put all our entries in shadow_set
- list(map(shadow_set.add, self))
-
- for project_name in plugin_projects:
-
- for dist in plugin_env[project_name]:
-
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError as v:
- # save error info
- error_info[dist] = v
- if fallback:
- # try the next older version of project
- continue
- else:
- # give up on this project, keep going
- break
-
- else:
- list(map(shadow_set.add, resolvees))
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
- def subscribe(self, callback, existing=True):
- """Invoke `callback` for all distributions
-
- If `existing=True` (default),
- call on all existing ones, as well.
- """
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
- if not existing:
- return
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
- self.normalized_to_canonical_keys.copy(), self.callbacks[:]
- )
-
- def __setstate__(self, e_k_b_n_c):
- entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
- self.callbacks = callbacks[:]
-
-
-class _ReqExtras(dict):
- """
- Map each requirement to the extras that demanded it.
- """
-
- def markers_pass(self, req, extras=None):
- """
- Evaluate markers for req against each extra that
- demanded it.
-
- Return False if the req has a marker and fails
- evaluation. Otherwise, return True.
- """
- extra_evals = (
- req.marker.evaluate({'extra': extra})
- for extra in self.get(req, ()) + (extras or (None,))
- )
- return not req.marker or any(extra_evals)
-
-
-class Environment:
- """Searchable snapshot of distributions on a search path"""
-
- def __init__(
- self, search_path=None, platform=get_supported_platform(),
- python=PY_MAJOR):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
- optional string naming the desired version of Python (e.g. ``'3.6'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
- py_compat = (
- self.python is None
- or dist.py_version is None
- or dist.py_version == self.python
- )
- return py_compat and compatible_platforms(dist.platform, self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self, project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
-
- Uses case-insensitive `project_name` comparison, assuming all the
- project's distributions use their project's name converted to all
- lowercase as their key.
-
- """
- distribution_key = project_name.lower()
- return self._distmap.get(distribution_key, [])
-
- def add(self, dist):
- """Add `dist` if we ``can_add()`` it and it has not already been added
- """
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key, [])
- if dist not in dists:
- dists.append(dist)
- dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
- def best_match(
- self, req, working_set, installer=None, replace_conflicting=False):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
- try:
- dist = working_set.find(req)
- except VersionConflict:
- if not replace_conflicting:
- raise
- dist = None
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- # try to download/install
- return self.obtain(req, installer)
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]:
- yield key
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other, Distribution):
- self.add(other)
- elif isinstance(other, Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(
- resource_name
- )
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(
- resource_name
- )
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- tmpl = textwrap.dedent("""
- Can't extract file(s) to egg cache
-
- The following error occurred while trying to extract file(s)
- to the Python egg cache:
-
- {old_exc}
-
- The Python egg cache directory is currently set to:
-
- {cache_path}
-
- Perhaps your account does not have write access to this directory?
- You can change the cache directory by setting the PYTHON_EGG_CACHE
- environment variable to point to an accessible directory.
- """).lstrip()
- err = ExtractionError(tmpl.format(**locals()))
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
- target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
- except Exception:
- self.extraction_error()
-
- self._warn_unsafe_extraction_path(extract_path)
-
- self.cached_files[target_path] = 1
- return target_path
-
- @staticmethod
- def _warn_unsafe_extraction_path(path):
- """
- If the default extraction path is overridden and set to an insecure
- location, such as /tmp, it opens up an opportunity for an attacker to
- replace an extracted file with an unauthorized payload. Warn the user
- if a known insecure location is used.
-
- See Distribute #375 for more details.
- """
- if os.name == 'nt' and not path.startswith(os.environ['windir']):
- # On Windows, permissions are generally restrictive by default
- # and temp directories are not writable by other users, so
- # bypass the warning.
- return
- mode = os.stat(path).st_mode
- if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
- msg = (
- "Extraction path is writable by group/others "
- "and vulnerable to attack when "
- "used with get_resource_filename ({path}). "
- "Consider a more secure "
- "location (set with .set_extraction_path or the "
- "PYTHON_EGG_CACHE environment variable)."
- ).format(**locals())
- warnings.warn(msg, UserWarning)
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
- os.chmod(tempname, mode)
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError(
- "Can't change extraction path, files already extracted"
- )
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-def get_default_cache():
- """
- Return the ``PYTHON_EGG_CACHE`` environment variable
- or a platform-relevant user cache dir for an app
- named "Python-Eggs".
- """
- return (
- os.environ.get('PYTHON_EGG_CACHE')
- or appdirs.user_cache_dir(appname='Python-Eggs')
- )
-
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """
- Convert an arbitrary string to a standard version string
- """
- try:
- # normalize the version
- return str(packaging.version.Version(version))
- except packaging.version.InvalidVersion:
- version = version.replace(' ', '.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
- return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-', '_')
-
-
-def invalid_marker(text):
- """
- Validate text as a PEP 508 environment marker; return an exception
- if invalid or False otherwise.
- """
- try:
- evaluate_marker(text)
- except SyntaxError as e:
- e.filename = None
- e.lineno = None
- return e
- return False
-
-
-def evaluate_marker(text, extra=None):
- """
- Evaluate a PEP 508 environment marker.
- Return a boolean indicating the marker result in this environment.
- Raise SyntaxError if marker is invalid.
-
- This implementation uses the 'pyparsing' module.
- """
- try:
- marker = packaging.markers.Marker(text)
- return marker.evaluate()
- except packaging.markers.InvalidMarker as e:
- raise SyntaxError(e) from e
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return io.BytesIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
- def _get_metadata_path(self, name):
- return self._fn(self.egg_info, name)
-
- def has_metadata(self, name):
- if not self.egg_info:
- return self.egg_info
-
- path = self._get_metadata_path(name)
- return self._has(path)
-
- def get_metadata(self, name):
- if not self.egg_info:
- return ""
- path = self._get_metadata_path(name)
- value = self._get(path)
- try:
- return value.decode('utf-8')
- except UnicodeDecodeError as exc:
- # Include the path in the error message to simplify
- # troubleshooting, and without changing the exception type.
- exc.reason += ' in {} file at path: {}'.format(name, path)
- raise
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self, resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self, name):
- return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
- def resource_listdir(self, resource_name):
- return self._listdir(self._fn(self.module_path, resource_name))
-
- def metadata_listdir(self, name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info, name))
- return []
-
- def run_script(self, script_name, namespace):
- script = 'scripts/' + script_name
- if not self.has_metadata(script):
- raise ResolutionError(
- "Script {script!r} not found in metadata at {self.egg_info!r}"
- .format(**locals()),
- )
- script_text = self.get_metadata(script).replace('\r\n', '\n')
- script_text = script_text.replace('\r', '\n')
- script_filename = self._fn(self.egg_info, script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
- with open(script_filename) as fid:
- source = fid.read()
- code = compile(source, script_filename, 'exec')
- exec(code, namespace, namespace)
- else:
- from linecache import cache
- cache[script_filename] = (
- len(script_text), 0, script_text.split('\n'), script_filename
- )
- script_code = compile(script_text, script_filename, 'exec')
- exec(script_code, namespace, namespace)
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
- self._validate_resource_path(resource_name)
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
- @staticmethod
- def _validate_resource_path(path):
- """
- Validate the resource paths according to the docs.
- https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
-
- >>> warned = getfixture('recwarn')
- >>> warnings.simplefilter('always')
- >>> vrp = NullProvider._validate_resource_path
- >>> vrp('foo/bar.txt')
- >>> bool(warned)
- False
- >>> vrp('../foo/bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('/foo/bar.txt')
- >>> bool(warned)
- True
- >>> vrp('foo/../../bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('foo/f../bar.txt')
- >>> bool(warned)
- False
-
- Windows path separators are straight-up disallowed.
- >>> vrp(r'\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- >>> vrp(r'C:\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- Blank values are allowed
-
- >>> vrp('')
- >>> bool(warned)
- False
-
- Non-string values are not.
-
- >>> vrp(None)
- Traceback (most recent call last):
- ...
- AttributeError: ...
- """
- invalid = (
- os.path.pardir in path.split(posixpath.sep) or
- posixpath.isabs(path) or
- ntpath.isabs(path)
- )
- if not invalid:
- return
-
- msg = "Use of .. or absolute path in a resource path is not allowed."
-
- # Aggressively disallow Windows absolute paths
- if ntpath.isabs(path) and not posixpath.isabs(path):
- raise ValueError(msg)
-
- # for compatibility, warn; in future
- # raise ValueError(msg)
- warnings.warn(
- msg[:-1] + " and will raise exceptions in a future release.",
- DeprecationWarning,
- stacklevel=4,
- )
-
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-
-register_loader_type(object, NullProvider)
-
-
-def _parents(path):
- """
- yield all parents of path including path
- """
- last = None
- while path != last:
- yield path
- last = path
- path, _ = os.path.split(path)
-
-
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self, module):
- super().__init__(module)
- self._setup_prefix()
-
- def _setup_prefix(self):
- # Assume that metadata may be nested inside a "basket"
- # of multiple eggs and use module_path instead of .archive.
- eggs = filter(_is_egg_path, _parents(self.module_path))
- egg = next(eggs, None)
- egg and self._set_egg(egg)
-
- def _set_egg(self, path):
- self.egg_name = os.path.basename(path)
- self.egg_info = os.path.join(path, 'EGG-INFO')
- self.egg_root = path
-
-
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self, path):
- return os.path.isdir(path)
-
- def _listdir(self, path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- with open(path, 'rb') as stream:
- return stream.read()
-
- @classmethod
- def _register(cls):
- loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
- for name in loader_names:
- loader_cls = getattr(importlib_machinery, name, type(None))
- register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- module_path = None
-
- _isdir = _has = lambda self, path: False
-
- def _get(self, path):
- return ''
-
- def _listdir(self, path):
- return []
-
- def __init__(self):
- pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
- """
- zip manifest builder
- """
-
- @classmethod
- def build(cls, path):
- """
- Build a dictionary similar to the zipimport directory
- caches, except instead of tuples, store ZipInfo objects.
-
- Use a platform-specific path separator (os.sep) for the path keys
- for compatibility with pypy on Windows.
- """
- with zipfile.ZipFile(path) as zfile:
- items = (
- (
- name.replace('/', os.sep),
- zfile.getinfo(name),
- )
- for name in zfile.namelist()
- )
- return dict(items)
-
- load = build
-
-
-class MemoizedZipManifests(ZipManifests):
- """
- Memoized zipfile manifests.
- """
- manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
- def load(self, path):
- """
- Load a manifest at path or return a suitable manifest already loaded.
- """
- path = os.path.normpath(path)
- mtime = os.stat(path).st_mtime
-
- if path not in self or self[path].mtime != mtime:
- manifest = self.build(path)
- self[path] = self.manifest_mod(manifest, mtime)
-
- return self[path].manifest
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
- _zip_manifests = MemoizedZipManifests()
-
- def __init__(self, module):
- super().__init__(module)
- self.zip_pre = self.loader.archive + os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
- fspath = fspath.rstrip(os.sep)
- if fspath == self.loader.archive:
- return ''
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre):]
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.zip_pre)
- )
-
- def _parts(self, zip_path):
- # Convert a zipfile subpath into an egg-relative path part list.
- # pseudo-fs path
- fspath = self.zip_pre + zip_path
- if fspath.startswith(self.egg_root + os.sep):
- return fspath[len(self.egg_root) + 1:].split(os.sep)
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.egg_root)
- )
-
- @property
- def zipinfo(self):
- return self._zip_manifests.load(self.loader.archive)
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- @staticmethod
- def _get_date_and_size(zip_stat):
- size = zip_stat.file_size
- # ymdhms+wday, yday, dst
- date_time = zip_stat.date_time + (0, 0, -1)
- # 1980 offset already done
- timestamp = time.mktime(date_time)
- return timestamp, size
-
- # FIXME: 'ZipProvider._extract_resource' is too complex (12)
- def _extract_resource(self, manager, zip_path): # noqa: C901
-
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(
- manager, os.path.join(zip_path, name)
- )
- # return the extracted directory name
- return os.path.dirname(last)
-
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
- if not WRITE_SUPPORT:
- raise IOError('"os.rename" and "os.unlink" are not supported '
- 'on this platform')
- try:
-
- real_path = manager.get_cache_path(
- self.egg_name, self._parts(zip_path)
- )
-
- if self._is_current(real_path, zip_path):
- return real_path
-
- outf, tmpnam = _mkstemp(
- ".$extract",
- dir=os.path.dirname(real_path),
- )
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp, timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- if self._is_current(real_path, zip_path):
- # the file became current since it was checked above,
- # so proceed.
- return real_path
- # Windows, del old file and retry
- elif os.name == 'nt':
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- # report a user-friendly error
- manager.extraction_error()
-
- return real_path
-
- def _is_current(self, file_path, zip_path):
- """
- Return True if the file_path is current for this zip_path
- """
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
- if not os.path.isfile(file_path):
- return False
- stat = os.stat(file_path)
- if stat.st_size != size or stat.st_mtime != timestamp:
- return False
- # check that the contents match
- zip_contents = self.loader.get_data(zip_path)
- with open(file_path, 'rb') as f:
- file_contents = f.read()
- return zip_contents == file_contents
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self, fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self, fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
- def _resource_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self, path):
- self.path = path
-
- def _get_metadata_path(self, name):
- return self.path
-
- def has_metadata(self, name):
- return name == 'PKG-INFO' and os.path.isfile(self.path)
-
- def get_metadata(self, name):
- if name != 'PKG-INFO':
- raise KeyError("No metadata except PKG-INFO is available")
-
- with io.open(self.path, encoding='utf-8', errors="replace") as f:
- metadata = f.read()
- self._warn_on_replacement(metadata)
- return metadata
-
- def _warn_on_replacement(self, metadata):
- replacement_char = '�'
- if replacement_char in metadata:
- tmpl = "{self.path} could not be properly decoded in UTF-8"
- msg = tmpl.format(**locals())
- warnings.warn(msg)
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
- self.zip_pre = importer.archive + os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
-_declare_state('dict', _distribution_finders={})
-
-
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
- """
- Find eggs in zip files; possibly multiple nested eggs.
- """
- if importer.archive.endswith('.whl'):
- # wheels are not supported with this finder
- # they don't have PKG-INFO metadata, and won't ever contain eggs
- return
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- # don't yield nested distros
- return
- for subitem in metadata.resource_listdir(''):
- if _is_egg_path(subitem):
- subpath = os.path.join(path_item, subitem)
- dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
- for dist in dists:
- yield dist
- elif subitem.lower().endswith(('.dist-info', '.egg-info')):
- subpath = os.path.join(path_item, subitem)
- submeta = EggMetadata(zipimport.zipimporter(subpath))
- submeta.egg_info = subpath
- yield Distribution.from_location(path_item, subitem, submeta)
-
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-
-def find_nothing(importer, path_item, only=False):
- return ()
-
-
-register_finder(object, find_nothing)
-
-
-def _by_version_descending(names):
- """
- Given a list of filenames, return them in descending order
- by version number.
-
- >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
- >>> _by_version_descending(names)
- ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo']
- >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
- >>> _by_version_descending(names)
- ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
- >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
- >>> _by_version_descending(names)
- ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
- """
- def try_parse(name):
- """
- Attempt to parse as a version or return a null version.
- """
- try:
- return packaging.version.Version(name)
- except Exception:
- return packaging.version.Version('0')
-
- def _by_version(name):
- """
- Parse each component of the filename
- """
- name, ext = os.path.splitext(name)
- parts = itertools.chain(name.split('-'), [ext])
- return [try_parse(part) for part in parts]
-
- return sorted(names, key=_by_version, reverse=True)
-
-
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
- if _is_unpacked_egg(path_item):
- yield Distribution.from_filename(
- path_item, metadata=PathMetadata(
- path_item, os.path.join(path_item, 'EGG-INFO')
- )
- )
- return
-
- entries = (
- os.path.join(path_item, child)
- for child in safe_listdir(path_item)
- )
-
- # for performance, before sorting by version,
- # screen entries for only those that will yield
- # distributions
- filtered = (
- entry
- for entry in entries
- if dist_factory(path_item, entry, only)
- )
-
- # scan for .egg and .egg-info in directory
- path_item_entries = _by_version_descending(filtered)
- for entry in path_item_entries:
- fullpath = os.path.join(path_item, entry)
- factory = dist_factory(path_item, entry, only)
- for dist in factory(fullpath):
- yield dist
-
-
-def dist_factory(path_item, entry, only):
- """Return a dist_factory for the given entry."""
- lower = entry.lower()
- is_egg_info = lower.endswith('.egg-info')
- is_dist_info = (
- lower.endswith('.dist-info') and
- os.path.isdir(os.path.join(path_item, entry))
- )
- is_meta = is_egg_info or is_dist_info
- return (
- distributions_from_metadata
- if is_meta else
- find_distributions
- if not only and _is_egg_path(entry) else
- resolve_egg_link
- if not only and lower.endswith('.egg-link') else
- NoDists()
- )
-
-
-class NoDists:
- """
- >>> bool(NoDists())
- False
-
- >>> list(NoDists()('anything'))
- []
- """
- def __bool__(self):
- return False
-
- def __call__(self, fullpath):
- return iter(())
-
-
-def safe_listdir(path):
- """
- Attempt to list contents of path, but suppress some exceptions.
- """
- try:
- return os.listdir(path)
- except (PermissionError, NotADirectoryError):
- pass
- except OSError as e:
- # Ignore the directory if does not exist, not a directory or
- # permission denied
- if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
- raise
- return ()
-
-
-def distributions_from_metadata(path):
- root = os.path.dirname(path)
- if os.path.isdir(path):
- if len(os.listdir(path)) == 0:
- # empty metadata dir; skip
- return
- metadata = PathMetadata(root, path)
- else:
- metadata = FileMetadata(path)
- entry = os.path.basename(path)
- yield Distribution.from_location(
- root, entry, metadata, precedence=DEVELOP_DIST,
- )
-
-
-def non_empty_lines(path):
- """
- Yield non-empty lines from file at path
- """
- with open(path) as f:
- for line in f:
- line = line.strip()
- if line:
- yield line
-
-
-def resolve_egg_link(path):
- """
- Given a path to an .egg-link, resolve distributions
- present in the referenced path.
- """
- referenced_paths = non_empty_lines(path)
- resolved_paths = (
- os.path.join(os.path.dirname(path), ref)
- for ref in referenced_paths
- )
- dist_groups = map(find_distributions, resolved_paths)
- return next(dist_groups, ())
-
-
-register_finder(pkgutil.ImpImporter, find_on_path)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer, path_entry, moduleName, module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
-
- importer = get_importer(path_item)
- if importer is None:
- return None
-
- # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
- try:
- spec = importer.find_spec(packageName)
- except AttributeError:
- # capture warnings due to #1111
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- loader = importer.find_module(packageName)
- else:
- loader = spec.loader if spec else None
-
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = types.ModuleType(packageName)
- module.__path__ = []
- _set_parent_ns(packageName)
- elif not hasattr(module, '__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer, path_item, packageName, module)
- if subpath is not None:
- path = module.__path__
- path.append(subpath)
- importlib.import_module(packageName)
- _rebuild_mod_path(path, packageName, module)
- return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
- """
- Rebuild module.__path__ ensuring that all entries are ordered
- corresponding to their sys.path order
- """
- sys_path = [_normalize_cached(p) for p in sys.path]
-
- def safe_sys_path_index(entry):
- """
- Workaround for #520 and #513.
- """
- try:
- return sys_path.index(entry)
- except ValueError:
- return float('inf')
-
- def position_in_sys_path(path):
- """
- Return the ordinal of the path based on its position in sys.path
- """
- path_parts = path.split(os.sep)
- module_parts = package_name.count('.') + 1
- parts = path_parts[:-module_parts]
- return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
- new_path = sorted(orig_path, key=position_in_sys_path)
- new_path = [_normalize_cached(p) for p in new_path]
-
- if isinstance(module.__path__, list):
- module.__path__[:] = new_path
- else:
- module.__path__ = new_path
-
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- _imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
- path = sys.path
- parent, _, _ = packageName.rpartition('.')
-
- if parent:
- declare_namespace(parent)
- if parent not in _namespace_packages:
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
- except AttributeError as e:
- raise TypeError("Not a package:", parent) from e
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
- _namespace_packages.setdefault(parent or None, []).append(packageName)
- _namespace_packages.setdefault(packageName, [])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- _imp.acquire_lock()
- try:
- for package in _namespace_packages.get(parent, ()):
- subpath = _handle_ns(package, path_item)
- if subpath:
- fixup_namespace_packages(subpath, package)
- finally:
- _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
- if _normalize_cached(item) == normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-
-register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
- return os.path.normcase(os.path.realpath(os.path.normpath(
- _cygwin_patch(filename))))
-
-
-def _cygwin_patch(filename): # pragma: nocover
- """
- Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
- symlink components. Using
- os.path.abspath() works around this limitation. A fix in os.getcwd()
- would probably better, in Cygwin even more so, except
- that this seems to be by design...
- """
- return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-
-
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
-
-def _is_egg_path(path):
- """
- Determine if given path appears to be an egg.
- """
- return _is_zip_egg(path) or _is_unpacked_egg(path)
-
-
-def _is_zip_egg(path):
- return (
- path.lower().endswith('.egg') and
- os.path.isfile(path) and
- zipfile.is_zipfile(path)
- )
-
-
-def _is_unpacked_egg(path):
- """
- Determine if given path appears to be an unpacked egg.
- """
- return (
- path.lower().endswith('.egg') and
- os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
- )
-
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"""
- (?P[^-]+) (
- -(?P[^-]+) (
- -py(?P[^-]+) (
- -(?P.+)
- )?
- )?
- )?
- """,
- re.VERBOSE | re.IGNORECASE,
-).match
-
-
-class EntryPoint:
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
- self.extras = tuple(extras)
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, *args, **kwargs):
- """
- Require packages for this EntryPoint, then resolve it.
- """
- if not require or args or kwargs:
- warnings.warn(
- "Parameters to load are deprecated. Call .resolve and "
- ".require separately.",
- PkgResourcesDeprecationWarning,
- stacklevel=2,
- )
- if require:
- self.require(*args, **kwargs)
- return self.resolve()
-
- def resolve(self):
- """
- Resolve the entry point from its module and attrs.
- """
- module = __import__(self.module_name, fromlist=['__name__'], level=0)
- try:
- return functools.reduce(getattr, self.attrs, module)
- except AttributeError as exc:
- raise ImportError(str(exc)) from exc
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
-
- # Get the requirements for this entry point with all its extras and
- # then resolve them. We have to pass `extras` along when resolving so
- # that the working set knows what extras we want. Otherwise, for
- # dist-info distributions, the working set will assume that the
- # requirements for that extra are purely optional and skip over them.
- reqs = self.dist.requires(self.extras)
- items = working_set.resolve(reqs, env, installer, extras=self.extras)
- list(map(working_set.add, items))
-
- pattern = re.compile(
- r'\s*'
- r'(?P.+?)\s*'
- r'=\s*'
- r'(?P[\w.]+)\s*'
- r'(:\s*(?P[\w.]+))?\s*'
- r'(?P\[.*\])?\s*$'
- )
-
- @classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1, extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- m = cls.pattern.match(src)
- if not m:
- msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
- raise ValueError(msg, src)
- res = m.groupdict()
- extras = cls._parse_extras(res['extras'])
- attrs = res['attr'].split('.') if res['attr'] else ()
- return cls(res['name'], res['module'], attrs, extras, dist)
-
- @classmethod
- def _parse_extras(cls, extras_spec):
- if not extras_spec:
- return ()
- req = Requirement.parse('x' + extras_spec)
- if req.specs:
- raise ValueError()
- return req.extras
-
- @classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
- this[ep.name] = ep
- return this
-
- @classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data, dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
-
-def _version_from_file(lines):
- """
- Given an iterable of lines from a Metadata file, return
- the value of the Version field, if present, or None otherwise.
- """
- def is_version_line(line):
- return line.lower().startswith('version:')
- version_lines = filter(is_version_line, lines)
- line = next(iter(version_lines), '')
- _, _, value = line.partition(':')
- return safe_version(value.strip()) or None
-
-
-class Distribution:
- """Wrap an actual or potential sys.path entry w/metadata"""
- PKG_INFO = 'PKG-INFO'
-
- def __init__(
- self, location=None, metadata=None, project_name=None,
- version=None, py_version=PY_MAJOR, platform=None,
- precedence=EGG_DIST):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- @classmethod
- def from_location(cls, location, basename, metadata=None, **kw):
- project_name, version, py_version, platform = [None] * 4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in _distributionImpl:
- cls = _distributionImpl[ext.lower()]
-
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name', 'ver', 'pyver', 'plat'
- )
- return cls(
- location, metadata, project_name=project_name, version=version,
- py_version=py_version, platform=platform, **kw
- )._reload_version()
-
- def _reload_version(self):
- return self
-
- @property
- def hashcmp(self):
- return (
- self.parsed_version,
- self.precedence,
- self.key,
- self.location,
- self.py_version or '',
- self.platform or '',
- )
-
- def __hash__(self):
- return hash(self.hashcmp)
-
- def __lt__(self, other):
- return self.hashcmp < other.hashcmp
-
- def __le__(self, other):
- return self.hashcmp <= other.hashcmp
-
- def __gt__(self, other):
- return self.hashcmp > other.hashcmp
-
- def __ge__(self, other):
- return self.hashcmp >= other.hashcmp
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- # It's not a Distribution, so they are not equal
- return False
- return self.hashcmp == other.hashcmp
-
- def __ne__(self, other):
- return not self == other
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- @property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
-
- @property
- def parsed_version(self):
- if not hasattr(self, "_parsed_version"):
- self._parsed_version = parse_version(self.version)
-
- return self._parsed_version
-
- def _warn_legacy_version(self):
- LV = packaging.version.LegacyVersion
- is_legacy = isinstance(self._parsed_version, LV)
- if not is_legacy:
- return
-
- # While an empty version is technically a legacy version and
- # is not a valid PEP 440 version, it's also unlikely to
- # actually come from someone and instead it is more likely that
- # it comes from setuptools attempting to parse a filename and
- # including it in the list. So for that we'll gate this warning
- # on if the version is anything at all or not.
- if not self.version:
- return
-
- tmpl = textwrap.dedent("""
- '{project_name} ({version})' is being parsed as a legacy,
- non PEP 440,
- version. You may find odd behavior and sort order.
- In particular it will be sorted as less than 0.0. It
- is recommended to migrate to PEP 440 compatible
- versions.
- """).strip().replace('\n', ' ')
-
- warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
-
- @property
- def version(self):
- try:
- return self._version
- except AttributeError as e:
- version = self._get_version()
- if version is None:
- path = self._get_metadata_path_for_display(self.PKG_INFO)
- msg = (
- "Missing 'Version:' header and/or {} file at path: {}"
- ).format(self.PKG_INFO, path)
- raise ValueError(msg, self) from e
-
- return version
-
- @property
- def _dep_map(self):
- """
- A map of extra to its list of (direct) requirements
- for this distribution, including the null extra.
- """
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._filter_extras(self._build_dep_map())
- return self.__dep_map
-
- @staticmethod
- def _filter_extras(dm):
- """
- Given a mapping of extras to dependencies, strip off
- environment markers and filter out any dependencies
- not matching the markers.
- """
- for extra in list(filter(None, dm)):
- new_extra = extra
- reqs = dm.pop(extra)
- new_extra, _, marker = extra.partition(':')
- fails_marker = marker and (
- invalid_marker(marker)
- or not evaluate_marker(marker)
- )
- if fails_marker:
- reqs = []
- new_extra = safe_extra(new_extra) or None
-
- dm.setdefault(new_extra, []).extend(reqs)
- return dm
-
- def _build_dep_map(self):
- dm = {}
- for name in 'requires.txt', 'depends.txt':
- for extra, reqs in split_sections(self._get_metadata(name)):
- dm.setdefault(extra, []).extend(parse_requirements(reqs))
- return dm
-
- def requires(self, extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None, ()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
- except KeyError as e:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
- ) from e
- return deps
-
- def _get_metadata_path_for_display(self, name):
- """
- Return the path to the given metadata file, if available.
- """
- try:
- # We need to access _get_metadata_path() on the provider object
- # directly rather than through this class's __getattr__()
- # since _get_metadata_path() is marked private.
- path = self._provider._get_metadata_path(name)
-
- # Handle exceptions e.g. in case the distribution's metadata
- # provider doesn't support _get_metadata_path().
- except Exception:
- return '[could not detect]'
-
- return path
-
- def _get_metadata(self, name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
- def _get_version(self):
- lines = self._get_metadata(self.PKG_INFO)
- version = _version_from_file(lines)
-
- return version
-
- def activate(self, path=None, replace=False):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None:
- path = sys.path
- self.insert_on(path, replace=replace)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- for pkg in self._get_metadata('namespace_packages.txt'):
- if pkg in sys.modules:
- declare_namespace(pkg)
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name), to_filename(self.version),
- self.py_version or PY_MAJOR
- )
-
- if self.platform:
- filename += '-' + self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self, self.location)
- else:
- return str(self)
-
- def __str__(self):
- try:
- version = getattr(self, 'version', None)
- except ValueError:
- version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name, version)
-
- def __getattr__(self, attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError(attr)
- return getattr(self._provider, attr)
-
- def __dir__(self):
- return list(
- set(super(Distribution, self).__dir__())
- | set(
- attr for attr in self._provider.__dir__()
- if not attr.startswith('_')
- )
- )
-
- @classmethod
- def from_filename(cls, filename, metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata,
- **kw
- )
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- if isinstance(self.parsed_version, packaging.version.Version):
- spec = "%s==%s" % (self.project_name, self.parsed_version)
- else:
- spec = "%s===%s" % (self.project_name, self.parsed_version)
-
- return Requirement.parse(spec)
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group, name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group, name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
- return ep_map.get(group, {})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
- # FIXME: 'Distribution.insert_on' is too complex (13)
- def insert_on(self, path, loc=None, replace=False): # noqa: C901
- """Ensure self.location is on path
-
- If replace=False (default):
- - If location is already in path anywhere, do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent.
- - Else: add to the end of path.
- If replace=True:
- - If location is already on path anywhere (not eggs)
- or higher priority than its parent (eggs)
- do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent,
- removing any lower-priority entries.
- - Else: add it to the front of path.
- """
-
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
- npath = [(p and _normalize_cached(p) or p) for p in path]
-
- for p, item in enumerate(npath):
- if item == nloc:
- if replace:
- break
- else:
- # don't modify path (even removing duplicates) if
- # found and not replace
- return
- elif item == bdir and self.precedence == EGG_DIST:
- # if it's an .egg, give it precedence over its directory
- # UNLESS it's already been added to sys.path and replace=False
- if (not replace) and nloc in npath[p:]:
- return
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- if replace:
- path.insert(0, loc)
- else:
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while True:
- try:
- np = npath.index(nloc, p + 1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- # ha!
- p = np
-
- return
-
- def check_version_conflict(self):
- if self.key == 'setuptools':
- # ignore the inevitable setuptools self-conflicts :(
- return
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (modname not in sys.modules or modname in nsp
- or modname in _namespace_packages):
- continue
- if modname in ('pkg_resources', 'setuptools', 'site'):
- continue
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (normalize_path(fn).startswith(loc) or
- fn.startswith(self.location)):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for " + repr(self))
- return False
- return True
-
- def clone(self, **kw):
- """Copy this distribution, substituting in any changed keyword args"""
- names = 'project_name version py_version platform location precedence'
- for attr in names.split():
- kw.setdefault(attr, getattr(self, attr, None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
- @property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
- def _reload_version(self):
- """
- Packages installed by distutils (e.g. numpy or scipy),
- which uses an old safe_version, and so
- their version numbers can get mangled when
- converted to filenames (e.g., 1.11.0.dev0+2329eae to
- 1.11.0.dev0_2329eae). These distributions will not be
- parsed properly
- downstream by Distribution and safe_version, so
- take an extra step and try to get the version number from
- the metadata file itself instead of the filename.
- """
- md_version = self._get_version()
- if md_version:
- self._version = md_version
- return self
-
-
-class DistInfoDistribution(Distribution):
- """
- Wrap an actual or potential sys.path entry
- w/metadata, .dist-info style.
- """
- PKG_INFO = 'METADATA'
- EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
- @property
- def _parsed_pkg_info(self):
- """Parse and cache metadata"""
- try:
- return self._pkg_info
- except AttributeError:
- metadata = self.get_metadata(self.PKG_INFO)
- self._pkg_info = email.parser.Parser().parsestr(metadata)
- return self._pkg_info
-
- @property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._compute_dependencies()
- return self.__dep_map
-
- def _compute_dependencies(self):
- """Recompute this distribution's dependencies."""
- dm = self.__dep_map = {None: []}
-
- reqs = []
- # Including any condition expressions
- for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
- reqs.extend(parse_requirements(req))
-
- def reqs_for_extra(extra):
- for req in reqs:
- if not req.marker or req.marker.evaluate({'extra': extra}):
- yield req
-
- common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
- dm[None].extend(common)
-
- for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
- s_extra = safe_extra(extra.strip())
- dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common]
-
- return dm
-
-
-_distributionImpl = {
- '.egg': Distribution,
- '.egg-info': EggInfoDistribution,
- '.dist-info': DistInfoDistribution,
-}
-
-
-def issue_warning(*args, **kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-def parse_requirements(strs):
- """
- Yield ``Requirement`` objects for each specification in `strs`.
-
- `strs` must be a string, or a (possibly-nested) iterable thereof.
- """
- return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
-
-
-class RequirementParseError(packaging.requirements.InvalidRequirement):
- "Compatibility wrapper for InvalidRequirement"
-
-
-class Requirement(packaging.requirements.Requirement):
- def __init__(self, requirement_string):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
- super(Requirement, self).__init__(requirement_string)
- self.unsafe_name = self.name
- project_name = safe_name(self.name)
- self.project_name, self.key = project_name, project_name.lower()
- self.specs = [
- (spec.operator, spec.version) for spec in self.specifier]
- self.extras = tuple(map(safe_extra, self.extras))
- self.hashCmp = (
- self.key,
- self.url,
- self.specifier,
- frozenset(self.extras),
- str(self.marker) if self.marker else None,
- )
- self.__hash = hash(self.hashCmp)
-
- def __eq__(self, other):
- return (
- isinstance(other, Requirement) and
- self.hashCmp == other.hashCmp
- )
-
- def __ne__(self, other):
- return not self == other
-
- def __contains__(self, item):
- if isinstance(item, Distribution):
- if item.key != self.key:
- return False
-
- item = item.version
-
- # Allow prereleases always in order to match the previous behavior of
- # this method. In the future this should be smarter and follow PEP 440
- # more accurately.
- return self.specifier.contains(item, prereleases=True)
-
- def __hash__(self):
- return self.__hash
-
- def __repr__(self):
- return "Requirement.parse(%r)" % str(self)
-
- @staticmethod
- def parse(s):
- req, = parse_requirements(s)
- return req
-
-
-def _always_object(classes):
- """
- Ensure object appears in the mro even
- for old-style classes.
- """
- if object not in classes:
- return classes + (object,)
- return classes
-
-
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
- types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
- for t in types:
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
- os.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
- """Sandbox-bypassing version of ensure_directory()"""
- if not WRITE_SUPPORT:
- raise IOError('"os.mkdir" not supported on this platform.')
- dirname, filename = split(path)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
- try:
- mkdir(dirname, 0o755)
- except FileExistsError:
- pass
-
-
-def split_sections(s):
- """Split a string or iterable thereof into (section, content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
-
-def _mkstemp(*args, **kw):
- old_open = os.open
- try:
- # temporarily bypass sandboxing
- os.open = os_open
- return tempfile.mkstemp(*args, **kw)
- finally:
- # and then put it back
- os.open = old_open
-
-
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
- f(*args, **kwargs)
- return f
-
-
-@_call_aside
-def _initialize(g=globals()):
- "Set up global resource manager (deliberately not state-saved)"
- manager = ResourceManager()
- g['_manager'] = manager
- g.update(
- (name, getattr(manager, name))
- for name in dir(manager)
- if not name.startswith('_')
- )
-
-
-class PkgResourcesDeprecationWarning(Warning):
- """
- Base class for warning about deprecations in ``pkg_resources``
-
- This class is not derived from ``DeprecationWarning``, and as such is
- visible by default.
- """
-
-
-@_call_aside
-def _initialize_master_working_set():
- """
- Prepare the master working set and make the ``require()``
- API available.
-
- This function has explicit effects on the global state
- of pkg_resources. It is intended to be invoked once at
- the initialization of this module.
-
- Invocation by other packages is unsupported and done
- at their own risk.
- """
- working_set = WorkingSet._build_master()
- _declare_state('object', working_set=working_set)
-
- require = working_set.require
- iter_entry_points = working_set.iter_entry_points
- add_activation_listener = working_set.subscribe
- run_script = working_set.run_script
- # backward compatibility
- run_main = run_script
- # Activate all distributions already on sys.path with replace=False and
- # ensure that all distributions added to the working set in the future
- # (e.g. by calling ``require()``) will get activated as well,
- # with higher priority (replace=True).
- tuple(
- dist.activate(replace=False)
- for dist in working_set
- )
- add_activation_listener(
- lambda dist: dist.activate(replace=True),
- existing=False,
- )
- working_set.entries = []
- # match order
- list(map(working_set.add_entry, sys.path))
- globals().update(locals())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/appdirs.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/appdirs.py
deleted file mode 100644
index ae67001a..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/appdirs.py
+++ /dev/null
@@ -1,608 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2005-2010 ActiveState Software Inc.
-# Copyright (c) 2013 Eddy Petrișor
-
-"""Utilities for determining application-specific dirs.
-
-See for details and usage.
-"""
-# Dev Notes:
-# - MSDN on where to store app data files:
-# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-
-__version_info__ = (1, 4, 3)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import sys
-import os
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- unicode = str
-
-if sys.platform.startswith('java'):
- import platform
- os_name = platform.java_ver()[3][0]
- if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
- system = 'win32'
- elif os_name.startswith('Mac'): # "Mac OS X", etc.
- system = 'darwin'
- else: # "Linux", "SunOS", "FreeBSD", etc.
- # Setting this to "linux2" is not ideal, but only Windows or Mac
- # are actually checked for and the rest of the module expects
- # *sys.platform* style strings.
- system = 'linux2'
-else:
- system = sys.platform
-
-
-
-def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user data directories are:
- Mac OS X: ~/Library/Application Support/
- Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined
- Win XP (not roaming): C:\Documents and Settings\\Application Data\\
- Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\
- Win 7 (not roaming): C:\Users\\AppData\Local\\
- Win 7 (roaming): C:\Users\\AppData\Roaming\\
-
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
- That means, by default "~/.local/share/".
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
- path = os.path.normpath(_get_win_folder(const))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Application Support/')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of data dirs should be
- returned. By default, the first item from XDG_DATA_DIRS is
- returned, or '/usr/local/share/',
- if XDG_DATA_DIRS is not set
-
- Typical site data directories are:
- Mac OS X: /Library/Application Support/
- Unix: /usr/local/share/ or /usr/share/
- Win XP: C:\Documents and Settings\All Users\Application Data\\
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
- Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7.
-
- For Unix, this is using the $XDG_DATA_DIRS[0] default.
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('/Library/Application Support')
- if appname:
- path = os.path.join(path, appname)
- else:
- # XDG default for $XDG_DATA_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_DATA_DIRS',
- os.pathsep.join(['/usr/local/share', '/usr/share']))
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific config dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user config directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
- That means, by default "~/.config/".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of config dirs should be
- returned. By default, the first item from XDG_CONFIG_DIRS is
- returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set
-
- Typical site config directories are:
- Mac OS X: same as site_data_dir
- Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in
- $XDG_CONFIG_DIRS
- Win *: same as site_data_dir
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-
- For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system in ["win32", "darwin"]:
- path = site_data_dir(appname, appauthor)
- if appname and version:
- path = os.path.join(path, version)
- else:
- # XDG default for $XDG_CONFIG_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
-
-def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific cache dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Cache" to the base app data dir for Windows. See
- discussion below.
-
- Typical user cache directories are:
- Mac OS X: ~/Library/Caches/
- Unix: ~/.cache/ (XDG default)
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache
- Vista: C:\Users\\AppData\Local\\\Cache
-
- On Windows the only suggestion in the MSDN docs is that local settings go in
- the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
- app data dir (the default returned by `user_data_dir` above). Apps typically
- put cache data somewhere *under* the given dir here. Some examples:
- ...\Mozilla\Firefox\Profiles\\Cache
- ...\Acme\SuperApp\Cache\1.0
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
- This can be disabled with the `opinion=False` option.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- if opinion:
- path = os.path.join(path, "Cache")
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Caches')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific state dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user state directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow this Debian proposal
- to extend the XDG spec and support $XDG_STATE_HOME.
-
- That means, by default "~/.local/state/".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific log dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Logs" to the base app data dir for Windows, and "log" to the
- base cache dir for Unix. See discussion below.
-
- Typical user log directories are:
- Mac OS X: ~/Library/Logs/
- Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs
- Vista: C:\Users\\AppData\Local\\\Logs
-
- On Windows the only suggestion in the MSDN docs is that local settings
- go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
- examples of what some windows apps use for a logs dir.)
-
- OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
- value for Windows and appends "log" to the user cache dir for Unix.
- This can be disabled with the `opinion=False` option.
- """
- if system == "darwin":
- path = os.path.join(
- os.path.expanduser('~/Library/Logs'),
- appname)
- elif system == "win32":
- path = user_data_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "Logs")
- else:
- path = user_cache_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "log")
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-class AppDirs(object):
- """Convenience wrapper for getting application dirs."""
- def __init__(self, appname=None, appauthor=None, version=None,
- roaming=False, multipath=False):
- self.appname = appname
- self.appauthor = appauthor
- self.version = version
- self.roaming = roaming
- self.multipath = multipath
-
- @property
- def user_data_dir(self):
- return user_data_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_data_dir(self):
- return site_data_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_config_dir(self):
- return user_config_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_config_dir(self):
- return site_config_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_cache_dir(self):
- return user_cache_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_state_dir(self):
- return user_state_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_log_dir(self):
- return user_log_dir(self.appname, self.appauthor,
- version=self.version)
-
-
-#---- internal support stuff
-
-def _get_win_folder_from_registry(csidl_name):
- """This is a fallback technique at best. I'm not sure if using the
- registry for this guarantees us the correct answer for all CSIDL_*
- names.
- """
- if PY3:
- import winreg as _winreg
- else:
- import _winreg
-
- shell_folder_name = {
- "CSIDL_APPDATA": "AppData",
- "CSIDL_COMMON_APPDATA": "Common AppData",
- "CSIDL_LOCAL_APPDATA": "Local AppData",
- }[csidl_name]
-
- key = _winreg.OpenKey(
- _winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
- )
- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
- return dir
-
-
-def _get_win_folder_with_pywin32(csidl_name):
- from win32com.shell import shellcon, shell
- dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
- # Try to make this a unicode path because SHGetFolderPath does
- # not return unicode strings when there is unicode data in the
- # path.
- try:
- dir = unicode(dir)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- try:
- import win32api
- dir = win32api.GetShortPathName(dir)
- except ImportError:
- pass
- except UnicodeError:
- pass
- return dir
-
-
-def _get_win_folder_with_ctypes(csidl_name):
- import ctypes
-
- csidl_const = {
- "CSIDL_APPDATA": 26,
- "CSIDL_COMMON_APPDATA": 35,
- "CSIDL_LOCAL_APPDATA": 28,
- }[csidl_name]
-
- buf = ctypes.create_unicode_buffer(1024)
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in buf:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf2 = ctypes.create_unicode_buffer(1024)
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
- buf = buf2
-
- return buf.value
-
-def _get_win_folder_with_jna(csidl_name):
- import array
- from com.sun import jna
- from com.sun.jna.platform import win32
-
- buf_size = win32.WinDef.MAX_PATH * 2
- buf = array.zeros('c', buf_size)
- shell = win32.Shell32.INSTANCE
- shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf = array.zeros('c', buf_size)
- kernel = win32.Kernel32.INSTANCE
- if kernel.GetShortPathName(dir, buf, buf_size):
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- return dir
-
-if system == "win32":
- try:
- import win32com.shell
- _get_win_folder = _get_win_folder_with_pywin32
- except ImportError:
- try:
- from ctypes import windll
- _get_win_folder = _get_win_folder_with_ctypes
- except ImportError:
- try:
- import com.sun.jna
- _get_win_folder = _get_win_folder_with_jna
- except ImportError:
- _get_win_folder = _get_win_folder_from_registry
-
-
-#---- self test code
-
-if __name__ == "__main__":
- appname = "MyApp"
- appauthor = "MyCompany"
-
- props = ("user_data_dir",
- "user_config_dir",
- "user_cache_dir",
- "user_state_dir",
- "user_log_dir",
- "site_data_dir",
- "site_config_dir")
-
- print("-- app dirs %s --" % __version__)
-
- print("-- app dirs (with optional 'version')")
- dirs = AppDirs(appname, appauthor, version="1.0")
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'version')")
- dirs = AppDirs(appname, appauthor)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'appauthor')")
- dirs = AppDirs(appname)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (with disabled 'appauthor')")
- dirs = AppDirs(appname, appauthor=False)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/__init__.py
deleted file mode 100644
index 34e3a995..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Read resources contained within a package."""
-
-from ._common import (
- as_file,
- files,
- Package,
-)
-
-from ._legacy import (
- contents,
- open_binary,
- read_binary,
- open_text,
- read_text,
- is_resource,
- path,
- Resource,
-)
-
-from .abc import ResourceReader
-
-
-__all__ = [
- 'Package',
- 'Resource',
- 'ResourceReader',
- 'as_file',
- 'contents',
- 'files',
- 'is_resource',
- 'open_binary',
- 'open_text',
- 'path',
- 'read_binary',
- 'read_text',
-]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py
deleted file mode 100644
index ea363d86..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py
+++ /dev/null
@@ -1,170 +0,0 @@
-from contextlib import suppress
-from io import TextIOWrapper
-
-from . import abc
-
-
-class SpecLoaderAdapter:
- """
- Adapt a package spec to adapt the underlying loader.
- """
-
- def __init__(self, spec, adapter=lambda spec: spec.loader):
- self.spec = spec
- self.loader = adapter(spec)
-
- def __getattr__(self, name):
- return getattr(self.spec, name)
-
-
-class TraversableResourcesLoader:
- """
- Adapt a loader to provide TraversableResources.
- """
-
- def __init__(self, spec):
- self.spec = spec
-
- def get_resource_reader(self, name):
- return CompatibilityFiles(self.spec)._native()
-
-
-def _io_wrapper(file, mode='r', *args, **kwargs):
- if mode == 'r':
- return TextIOWrapper(file, *args, **kwargs)
- elif mode == 'rb':
- return file
- raise ValueError(
- "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
- )
-
-
-class CompatibilityFiles:
- """
- Adapter for an existing or non-existent resource reader
- to provide a compatibility .files().
- """
-
- class SpecPath(abc.Traversable):
- """
- Path tied to a module spec.
- Can be read and exposes the resource reader children.
- """
-
- def __init__(self, spec, reader):
- self._spec = spec
- self._reader = reader
-
- def iterdir(self):
- if not self._reader:
- return iter(())
- return iter(
- CompatibilityFiles.ChildPath(self._reader, path)
- for path in self._reader.contents()
- )
-
- def is_file(self):
- return False
-
- is_dir = is_file
-
- def joinpath(self, other):
- if not self._reader:
- return CompatibilityFiles.OrphanPath(other)
- return CompatibilityFiles.ChildPath(self._reader, other)
-
- @property
- def name(self):
- return self._spec.name
-
- def open(self, mode='r', *args, **kwargs):
- return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
-
- class ChildPath(abc.Traversable):
- """
- Path tied to a resource reader child.
- Can be read but doesn't expose any meaningful children.
- """
-
- def __init__(self, reader, name):
- self._reader = reader
- self._name = name
-
- def iterdir(self):
- return iter(())
-
- def is_file(self):
- return self._reader.is_resource(self.name)
-
- def is_dir(self):
- return not self.is_file()
-
- def joinpath(self, other):
- return CompatibilityFiles.OrphanPath(self.name, other)
-
- @property
- def name(self):
- return self._name
-
- def open(self, mode='r', *args, **kwargs):
- return _io_wrapper(
- self._reader.open_resource(self.name), mode, *args, **kwargs
- )
-
- class OrphanPath(abc.Traversable):
- """
- Orphan path, not tied to a module spec or resource reader.
- Can't be read and doesn't expose any meaningful children.
- """
-
- def __init__(self, *path_parts):
- if len(path_parts) < 1:
- raise ValueError('Need at least one path part to construct a path')
- self._path = path_parts
-
- def iterdir(self):
- return iter(())
-
- def is_file(self):
- return False
-
- is_dir = is_file
-
- def joinpath(self, other):
- return CompatibilityFiles.OrphanPath(*self._path, other)
-
- @property
- def name(self):
- return self._path[-1]
-
- def open(self, mode='r', *args, **kwargs):
- raise FileNotFoundError("Can't open orphan path")
-
- def __init__(self, spec):
- self.spec = spec
-
- @property
- def _reader(self):
- with suppress(AttributeError):
- return self.spec.loader.get_resource_reader(self.spec.name)
-
- def _native(self):
- """
- Return the native reader if it supports files().
- """
- reader = self._reader
- return reader if hasattr(reader, 'files') else self
-
- def __getattr__(self, attr):
- return getattr(self._reader, attr)
-
- def files(self):
- return CompatibilityFiles.SpecPath(self.spec, self._reader)
-
-
-def wrap_spec(package):
- """
- Construct a package spec with traversable compatibility
- on the spec/loader/reader.
- """
- return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py
deleted file mode 100644
index a12e2c75..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import os
-import pathlib
-import tempfile
-import functools
-import contextlib
-import types
-import importlib
-
-from typing import Union, Optional
-from .abc import ResourceReader, Traversable
-
-from ._compat import wrap_spec
-
-Package = Union[types.ModuleType, str]
-
-
-def files(package):
- # type: (Package) -> Traversable
- """
- Get a Traversable resource from a package
- """
- return from_package(get_package(package))
-
-
-def get_resource_reader(package):
- # type: (types.ModuleType) -> Optional[ResourceReader]
- """
- Return the package's loader if it's a ResourceReader.
- """
- # We can't use
- # a issubclass() check here because apparently abc.'s __subclasscheck__()
- # hook wants to create a weak reference to the object, but
- # zipimport.zipimporter does not support weak references, resulting in a
- # TypeError. That seems terrible.
- spec = package.__spec__
- reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
- if reader is None:
- return None
- return reader(spec.name) # type: ignore
-
-
-def resolve(cand):
- # type: (Package) -> types.ModuleType
- return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
-
-
-def get_package(package):
- # type: (Package) -> types.ModuleType
- """Take a package name or module object and return the module.
-
- Raise an exception if the resolved module is not a package.
- """
- resolved = resolve(package)
- if wrap_spec(resolved).submodule_search_locations is None:
- raise TypeError(f'{package!r} is not a package')
- return resolved
-
-
-def from_package(package):
- """
- Return a Traversable object for the given package.
-
- """
- spec = wrap_spec(package)
- reader = spec.loader.get_resource_reader(spec.name)
- return reader.files()
-
-
-@contextlib.contextmanager
-def _tempfile(reader, suffix=''):
- # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
- # blocks due to the need to close the temporary file to work on Windows
- # properly.
- fd, raw_path = tempfile.mkstemp(suffix=suffix)
- try:
- try:
- os.write(fd, reader())
- finally:
- os.close(fd)
- del reader
- yield pathlib.Path(raw_path)
- finally:
- try:
- os.remove(raw_path)
- except FileNotFoundError:
- pass
-
-
-@functools.singledispatch
-def as_file(path):
- """
- Given a Traversable object, return that object as a
- path on the local file system in a context manager.
- """
- return _tempfile(path.read_bytes, suffix=path.name)
-
-
-@as_file.register(pathlib.Path)
-@contextlib.contextmanager
-def _(path):
- """
- Degenerate behavior for pathlib.Path objects.
- """
- yield path
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py
deleted file mode 100644
index cb9fc820..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# flake8: noqa
-
-import abc
-import sys
-import pathlib
-from contextlib import suppress
-
-if sys.version_info >= (3, 10):
- from zipfile import Path as ZipPath # type: ignore
-else:
- from ..zipp import Path as ZipPath # type: ignore
-
-
-try:
- from typing import runtime_checkable # type: ignore
-except ImportError:
-
- def runtime_checkable(cls): # type: ignore
- return cls
-
-
-try:
- from typing import Protocol # type: ignore
-except ImportError:
- Protocol = abc.ABC # type: ignore
-
-
-class TraversableResourcesLoader:
- """
- Adapt loaders to provide TraversableResources and other
- compatibility.
-
- Used primarily for Python 3.9 and earlier where the native
- loaders do not yet implement TraversableResources.
- """
-
- def __init__(self, spec):
- self.spec = spec
-
- @property
- def path(self):
- return self.spec.origin
-
- def get_resource_reader(self, name):
- from . import readers, _adapters
-
- def _zip_reader(spec):
- with suppress(AttributeError):
- return readers.ZipReader(spec.loader, spec.name)
-
- def _namespace_reader(spec):
- with suppress(AttributeError, ValueError):
- return readers.NamespaceReader(spec.submodule_search_locations)
-
- def _available_reader(spec):
- with suppress(AttributeError):
- return spec.loader.get_resource_reader(spec.name)
-
- def _native_reader(spec):
- reader = _available_reader(spec)
- return reader if hasattr(reader, 'files') else None
-
- def _file_reader(spec):
- try:
- path = pathlib.Path(self.path)
- except TypeError:
- return None
- if path.exists():
- return readers.FileReader(self)
-
- return (
- # native reader if it supplies 'files'
- _native_reader(self.spec)
- or
- # local ZipReader if a zip module
- _zip_reader(self.spec)
- or
- # local NamespaceReader if a namespace module
- _namespace_reader(self.spec)
- or
- # local FileReader
- _file_reader(self.spec)
- # fallback - adapt the spec ResourceReader to TraversableReader
- or _adapters.CompatibilityFiles(self.spec)
- )
-
-
-def wrap_spec(package):
- """
- Construct a package spec with traversable compatibility
- on the spec/loader/reader.
-
- Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
- from above for older Python compatibility (<3.10).
- """
- from . import _adapters
-
- return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py
deleted file mode 100644
index cce05582..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from itertools import filterfalse
-
-from typing import (
- Callable,
- Iterable,
- Iterator,
- Optional,
- Set,
- TypeVar,
- Union,
-)
-
-# Type and type variable definitions
-_T = TypeVar('_T')
-_U = TypeVar('_U')
-
-
-def unique_everseen(
- iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
-) -> Iterator[_T]:
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen: Set[Union[_T, _U]] = set()
- seen_add = seen.add
- if key is None:
- for element in filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py
deleted file mode 100644
index 1d5d3f1f..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import functools
-import os
-import pathlib
-import types
-import warnings
-
-from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
-
-from . import _common
-
-Package = Union[types.ModuleType, str]
-Resource = str
-
-
-def deprecated(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- warnings.warn(
- f"{func.__name__} is deprecated. Use files() instead. "
- "Refer to https://importlib-resources.readthedocs.io"
- "/en/latest/using.html#migrating-from-legacy for migration advice.",
- DeprecationWarning,
- stacklevel=2,
- )
- return func(*args, **kwargs)
-
- return wrapper
-
-
-def normalize_path(path):
- # type: (Any) -> str
- """Normalize a path by ensuring it is a string.
-
- If the resulting string contains path separators, an exception is raised.
- """
- str_path = str(path)
- parent, file_name = os.path.split(str_path)
- if parent:
- raise ValueError(f'{path!r} must be only a file name')
- return file_name
-
-
-@deprecated
-def open_binary(package: Package, resource: Resource) -> BinaryIO:
- """Return a file-like object opened for binary reading of the resource."""
- return (_common.files(package) / normalize_path(resource)).open('rb')
-
-
-@deprecated
-def read_binary(package: Package, resource: Resource) -> bytes:
- """Return the binary contents of the resource."""
- return (_common.files(package) / normalize_path(resource)).read_bytes()
-
-
-@deprecated
-def open_text(
- package: Package,
- resource: Resource,
- encoding: str = 'utf-8',
- errors: str = 'strict',
-) -> TextIO:
- """Return a file-like object opened for text reading of the resource."""
- return (_common.files(package) / normalize_path(resource)).open(
- 'r', encoding=encoding, errors=errors
- )
-
-
-@deprecated
-def read_text(
- package: Package,
- resource: Resource,
- encoding: str = 'utf-8',
- errors: str = 'strict',
-) -> str:
- """Return the decoded string of the resource.
-
- The decoding-related arguments have the same semantics as those of
- bytes.decode().
- """
- with open_text(package, resource, encoding, errors) as fp:
- return fp.read()
-
-
-@deprecated
-def contents(package: Package) -> Iterable[str]:
- """Return an iterable of entries in `package`.
-
- Note that not all entries are resources. Specifically, directories are
- not considered resources. Use `is_resource()` on each entry returned here
- to check if it is a resource or not.
- """
- return [path.name for path in _common.files(package).iterdir()]
-
-
-@deprecated
-def is_resource(package: Package, name: str) -> bool:
- """True if `name` is a resource inside `package`.
-
- Directories are *not* resources.
- """
- resource = normalize_path(name)
- return any(
- traversable.name == resource and traversable.is_file()
- for traversable in _common.files(package).iterdir()
- )
-
-
-@deprecated
-def path(
- package: Package,
- resource: Resource,
-) -> ContextManager[pathlib.Path]:
- """A context manager providing a file path object to the resource.
-
- If the resource does not already exist on its own on the file system,
- a temporary file will be created. If the file was created, the file
- will be deleted upon exiting the context manager (no exception is
- raised if the file was deleted prior to the context manager
- exiting).
- """
- return _common.as_file(_common.files(package) / normalize_path(resource))
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py
deleted file mode 100644
index d39dc1ad..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import abc
-from typing import BinaryIO, Iterable, Text
-
-from ._compat import runtime_checkable, Protocol
-
-
-class ResourceReader(metaclass=abc.ABCMeta):
- """Abstract base class for loaders to provide resource reading support."""
-
- @abc.abstractmethod
- def open_resource(self, resource: Text) -> BinaryIO:
- """Return an opened, file-like object for binary reading.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource cannot be found, FileNotFoundError is raised.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def resource_path(self, resource: Text) -> Text:
- """Return the file system path to the specified resource.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource does not exist on the file system, raise
- FileNotFoundError.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def is_resource(self, path: Text) -> bool:
- """Return True if the named 'path' is a resource.
-
- Files are resources, directories are not.
- """
- raise FileNotFoundError
-
- @abc.abstractmethod
- def contents(self) -> Iterable[str]:
- """Return an iterable of entries in `package`."""
- raise FileNotFoundError
-
-
-@runtime_checkable
-class Traversable(Protocol):
- """
- An object with a subset of pathlib.Path methods suitable for
- traversing directories and opening files.
- """
-
- @abc.abstractmethod
- def iterdir(self):
- """
- Yield Traversable objects in self
- """
-
- def read_bytes(self):
- """
- Read contents of self as bytes
- """
- with self.open('rb') as strm:
- return strm.read()
-
- def read_text(self, encoding=None):
- """
- Read contents of self as text
- """
- with self.open(encoding=encoding) as strm:
- return strm.read()
-
- @abc.abstractmethod
- def is_dir(self) -> bool:
- """
- Return True if self is a directory
- """
-
- @abc.abstractmethod
- def is_file(self) -> bool:
- """
- Return True if self is a file
- """
-
- @abc.abstractmethod
- def joinpath(self, child):
- """
- Return Traversable child in self
- """
-
- def __truediv__(self, child):
- """
- Return Traversable child in self
- """
- return self.joinpath(child)
-
- @abc.abstractmethod
- def open(self, mode='r', *args, **kwargs):
- """
- mode may be 'r' or 'rb' to open as text or binary. Return a handle
- suitable for reading (same as pathlib.Path.open).
-
- When opening as text, accepts encoding parameters such as those
- accepted by io.TextIOWrapper.
- """
-
- @abc.abstractproperty
- def name(self) -> str:
- """
- The base name of this object without any parent references.
- """
-
-
-class TraversableResources(ResourceReader):
- """
- The required interface for providing traversable
- resources.
- """
-
- @abc.abstractmethod
- def files(self):
- """Return a Traversable object for the loaded package."""
-
- def open_resource(self, resource):
- return self.files().joinpath(resource).open('rb')
-
- def resource_path(self, resource):
- raise FileNotFoundError(resource)
-
- def is_resource(self, path):
- return self.files().joinpath(path).is_file()
-
- def contents(self):
- return (item.name for item in self.files().iterdir())
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py
deleted file mode 100644
index f1190ca4..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import collections
-import pathlib
-import operator
-
-from . import abc
-
-from ._itertools import unique_everseen
-from ._compat import ZipPath
-
-
-def remove_duplicates(items):
- return iter(collections.OrderedDict.fromkeys(items))
-
-
-class FileReader(abc.TraversableResources):
- def __init__(self, loader):
- self.path = pathlib.Path(loader.path).parent
-
- def resource_path(self, resource):
- """
- Return the file system path to prevent
- `resources.path()` from creating a temporary
- copy.
- """
- return str(self.path.joinpath(resource))
-
- def files(self):
- return self.path
-
-
-class ZipReader(abc.TraversableResources):
- def __init__(self, loader, module):
- _, _, name = module.rpartition('.')
- self.prefix = loader.prefix.replace('\\', '/') + name + '/'
- self.archive = loader.archive
-
- def open_resource(self, resource):
- try:
- return super().open_resource(resource)
- except KeyError as exc:
- raise FileNotFoundError(exc.args[0])
-
- def is_resource(self, path):
- # workaround for `zipfile.Path.is_file` returning true
- # for non-existent paths.
- target = self.files().joinpath(path)
- return target.is_file() and target.exists()
-
- def files(self):
- return ZipPath(self.archive, self.prefix)
-
-
-class MultiplexedPath(abc.Traversable):
- """
- Given a series of Traversable objects, implement a merged
- version of the interface across all objects. Useful for
- namespace packages which may be multihomed at a single
- name.
- """
-
- def __init__(self, *paths):
- self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
- if not self._paths:
- message = 'MultiplexedPath must contain at least one path'
- raise FileNotFoundError(message)
- if not all(path.is_dir() for path in self._paths):
- raise NotADirectoryError('MultiplexedPath only supports directories')
-
- def iterdir(self):
- files = (file for path in self._paths for file in path.iterdir())
- return unique_everseen(files, key=operator.attrgetter('name'))
-
- def read_bytes(self):
- raise FileNotFoundError(f'{self} is not a file')
-
- def read_text(self, *args, **kwargs):
- raise FileNotFoundError(f'{self} is not a file')
-
- def is_dir(self):
- return True
-
- def is_file(self):
- return False
-
- def joinpath(self, child):
- # first try to find child in current paths
- for file in self.iterdir():
- if file.name == child:
- return file
- # if it does not exist, construct it with the first path
- return self._paths[0] / child
-
- __truediv__ = joinpath
-
- def open(self, *args, **kwargs):
- raise FileNotFoundError(f'{self} is not a file')
-
- @property
- def name(self):
- return self._paths[0].name
-
- def __repr__(self):
- paths = ', '.join(f"'{path}'" for path in self._paths)
- return f'MultiplexedPath({paths})'
-
-
-class NamespaceReader(abc.TraversableResources):
- def __init__(self, namespace_path):
- if 'NamespacePath' not in str(namespace_path):
- raise ValueError('Invalid path')
- self.path = MultiplexedPath(*list(namespace_path))
-
- def resource_path(self, resource):
- """
- Return the file system path to prevent
- `resources.path()` from creating a temporary
- copy.
- """
- return str(self.path.joinpath(resource))
-
- def files(self):
- return self.path
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py
deleted file mode 100644
index da073cbd..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""
-Interface adapters for low-level readers.
-"""
-
-import abc
-import io
-import itertools
-from typing import BinaryIO, List
-
-from .abc import Traversable, TraversableResources
-
-
-class SimpleReader(abc.ABC):
- """
- The minimum, low-level interface required from a resource
- provider.
- """
-
- @abc.abstractproperty
- def package(self):
- # type: () -> str
- """
- The name of the package for which this reader loads resources.
- """
-
- @abc.abstractmethod
- def children(self):
- # type: () -> List['SimpleReader']
- """
- Obtain an iterable of SimpleReader for available
- child containers (e.g. directories).
- """
-
- @abc.abstractmethod
- def resources(self):
- # type: () -> List[str]
- """
- Obtain available named resources for this virtual package.
- """
-
- @abc.abstractmethod
- def open_binary(self, resource):
- # type: (str) -> BinaryIO
- """
- Obtain a File-like for a named resource.
- """
-
- @property
- def name(self):
- return self.package.split('.')[-1]
-
-
-class ResourceHandle(Traversable):
- """
- Handle to a named resource in a ResourceReader.
- """
-
- def __init__(self, parent, name):
- # type: (ResourceContainer, str) -> None
- self.parent = parent
- self.name = name # type: ignore
-
- def is_file(self):
- return True
-
- def is_dir(self):
- return False
-
- def open(self, mode='r', *args, **kwargs):
- stream = self.parent.reader.open_binary(self.name)
- if 'b' not in mode:
- stream = io.TextIOWrapper(*args, **kwargs)
- return stream
-
- def joinpath(self, name):
- raise RuntimeError("Cannot traverse into a resource")
-
-
-class ResourceContainer(Traversable):
- """
- Traversable container for a package's resources via its reader.
- """
-
- def __init__(self, reader):
- # type: (SimpleReader) -> None
- self.reader = reader
-
- def is_dir(self):
- return True
-
- def is_file(self):
- return False
-
- def iterdir(self):
- files = (ResourceHandle(self, name) for name in self.reader.resources)
- dirs = map(ResourceContainer, self.reader.children())
- return itertools.chain(files, dirs)
-
- def open(self, *args, **kwargs):
- raise IsADirectoryError()
-
- def joinpath(self, name):
- return next(
- traversable for traversable in self.iterdir() if traversable.name == name
- )
-
-
-class TraversableReader(TraversableResources, SimpleReader):
- """
- A TraversableResources based on SimpleReader. Resource providers
- may derive from this class to provide the TraversableResources
- interface by supplying the SimpleReader interface.
- """
-
- def files(self):
- return ResourceContainer(self)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/context.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/context.py
deleted file mode 100644
index 87a4e3dc..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/context.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import os
-import subprocess
-import contextlib
-import functools
-import tempfile
-import shutil
-import operator
-
-
-@contextlib.contextmanager
-def pushd(dir):
- orig = os.getcwd()
- os.chdir(dir)
- try:
- yield dir
- finally:
- os.chdir(orig)
-
-
-@contextlib.contextmanager
-def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
- """
- Get a tarball, extract it, change to that directory, yield, then
- clean up.
- `runner` is the function to invoke commands.
- `pushd` is a context manager for changing the directory.
- """
- if target_dir is None:
- target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
- if runner is None:
- runner = functools.partial(subprocess.check_call, shell=True)
- # In the tar command, use --strip-components=1 to strip the first path and
- # then
- # use -C to cause the files to be extracted to {target_dir}. This ensures
- # that we always know where the files were extracted.
- runner('mkdir {target_dir}'.format(**vars()))
- try:
- getter = 'wget {url} -O -'
- extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
- cmd = ' | '.join((getter, extract))
- runner(cmd.format(compression=infer_compression(url), **vars()))
- with pushd(target_dir):
- yield target_dir
- finally:
- runner('rm -Rf {target_dir}'.format(**vars()))
-
-
-def infer_compression(url):
- """
- Given a URL or filename, infer the compression code for tar.
- """
- # cheat and just assume it's the last two characters
- compression_indicator = url[-2:]
- mapping = dict(gz='z', bz='j', xz='J')
- # Assume 'z' (gzip) if no match
- return mapping.get(compression_indicator, 'z')
-
-
-@contextlib.contextmanager
-def temp_dir(remover=shutil.rmtree):
- """
- Create a temporary directory context. Pass a custom remover
- to override the removal behavior.
- """
- temp_dir = tempfile.mkdtemp()
- try:
- yield temp_dir
- finally:
- remover(temp_dir)
-
-
-@contextlib.contextmanager
-def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
- """
- Check out the repo indicated by url.
-
- If dest_ctx is supplied, it should be a context manager
- to yield the target directory for the check out.
- """
- exe = 'git' if 'git' in url else 'hg'
- with dest_ctx() as repo_dir:
- cmd = [exe, 'clone', url, repo_dir]
- if branch:
- cmd.extend(['--branch', branch])
- devnull = open(os.path.devnull, 'w')
- stdout = devnull if quiet else None
- subprocess.check_call(cmd, stdout=stdout)
- yield repo_dir
-
-
-@contextlib.contextmanager
-def null():
- yield
-
-
-class ExceptionTrap:
- """
- A context manager that will catch certain exceptions and provide an
- indication they occurred.
-
- >>> with ExceptionTrap() as trap:
- ... raise Exception()
- >>> bool(trap)
- True
-
- >>> with ExceptionTrap() as trap:
- ... pass
- >>> bool(trap)
- False
-
- >>> with ExceptionTrap(ValueError) as trap:
- ... raise ValueError("1 + 1 is not 3")
- >>> bool(trap)
- True
-
- >>> with ExceptionTrap(ValueError) as trap:
- ... raise Exception()
- Traceback (most recent call last):
- ...
- Exception
-
- >>> bool(trap)
- False
- """
-
- exc_info = None, None, None
-
- def __init__(self, exceptions=(Exception,)):
- self.exceptions = exceptions
-
- def __enter__(self):
- return self
-
- @property
- def type(self):
- return self.exc_info[0]
-
- @property
- def value(self):
- return self.exc_info[1]
-
- @property
- def tb(self):
- return self.exc_info[2]
-
- def __exit__(self, *exc_info):
- type = exc_info[0]
- matches = type and issubclass(type, self.exceptions)
- if matches:
- self.exc_info = exc_info
- return matches
-
- def __bool__(self):
- return bool(self.type)
-
- def raises(self, func, *, _test=bool):
- """
- Wrap func and replace the result with the truth
- value of the trap (True if an exception occurred).
-
- First, give the decorator an alias to support Python 3.8
- Syntax.
-
- >>> raises = ExceptionTrap(ValueError).raises
-
- Now decorate a function that always fails.
-
- >>> @raises
- ... def fail():
- ... raise ValueError('failed')
- >>> fail()
- True
- """
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- with ExceptionTrap(self.exceptions) as trap:
- func(*args, **kwargs)
- return _test(trap)
-
- return wrapper
-
- def passes(self, func):
- """
- Wrap func and replace the result with the truth
- value of the trap (True if no exception).
-
- First, give the decorator an alias to support Python 3.8
- Syntax.
-
- >>> passes = ExceptionTrap(ValueError).passes
-
- Now decorate a function that always fails.
-
- >>> @passes
- ... def fail():
- ... raise ValueError('failed')
-
- >>> fail()
- False
- """
- return self.raises(func, _test=operator.not_)
-
-
-class suppress(contextlib.suppress, contextlib.ContextDecorator):
- """
- A version of contextlib.suppress with decorator support.
-
- >>> @suppress(KeyError)
- ... def key_error():
- ... {}['']
- >>> key_error()
- """
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/functools.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/functools.py
deleted file mode 100644
index a3fea3a1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/functools.py
+++ /dev/null
@@ -1,525 +0,0 @@
-import functools
-import time
-import inspect
-import collections
-import types
-import itertools
-
-import pkg_resources.extern.more_itertools
-
-from typing import Callable, TypeVar
-
-
-CallableT = TypeVar("CallableT", bound=Callable[..., object])
-
-
-def compose(*funcs):
- """
- Compose any number of unary functions into a single unary function.
-
- >>> import textwrap
- >>> expected = str.strip(textwrap.dedent(compose.__doc__))
- >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
- >>> strip_and_dedent(compose.__doc__) == expected
- True
-
- Compose also allows the innermost function to take arbitrary arguments.
-
- >>> round_three = lambda x: round(x, ndigits=3)
- >>> f = compose(round_three, int.__truediv__)
- >>> [f(3*x, x+1) for x in range(1,10)]
- [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
- """
-
- def compose_two(f1, f2):
- return lambda *args, **kwargs: f1(f2(*args, **kwargs))
-
- return functools.reduce(compose_two, funcs)
-
-
-def method_caller(method_name, *args, **kwargs):
- """
- Return a function that will call a named method on the
- target object with optional positional and keyword
- arguments.
-
- >>> lower = method_caller('lower')
- >>> lower('MyString')
- 'mystring'
- """
-
- def call_method(target):
- func = getattr(target, method_name)
- return func(*args, **kwargs)
-
- return call_method
-
-
-def once(func):
- """
- Decorate func so it's only ever called the first time.
-
- This decorator can ensure that an expensive or non-idempotent function
- will not be expensive on subsequent calls and is idempotent.
-
- >>> add_three = once(lambda a: a+3)
- >>> add_three(3)
- 6
- >>> add_three(9)
- 6
- >>> add_three('12')
- 6
-
- To reset the stored value, simply clear the property ``saved_result``.
-
- >>> del add_three.saved_result
- >>> add_three(9)
- 12
- >>> add_three(8)
- 12
-
- Or invoke 'reset()' on it.
-
- >>> add_three.reset()
- >>> add_three(-3)
- 0
- >>> add_three(0)
- 0
- """
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- if not hasattr(wrapper, 'saved_result'):
- wrapper.saved_result = func(*args, **kwargs)
- return wrapper.saved_result
-
- wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
- return wrapper
-
-
-def method_cache(
- method: CallableT,
- cache_wrapper: Callable[
- [CallableT], CallableT
- ] = functools.lru_cache(), # type: ignore[assignment]
-) -> CallableT:
- """
- Wrap lru_cache to support storing the cache data in the object instances.
-
- Abstracts the common paradigm where the method explicitly saves an
- underscore-prefixed protected property on first call and returns that
- subsequently.
-
- >>> class MyClass:
- ... calls = 0
- ...
- ... @method_cache
- ... def method(self, value):
- ... self.calls += 1
- ... return value
-
- >>> a = MyClass()
- >>> a.method(3)
- 3
- >>> for x in range(75):
- ... res = a.method(x)
- >>> a.calls
- 75
-
- Note that the apparent behavior will be exactly like that of lru_cache
- except that the cache is stored on each instance, so values in one
- instance will not flush values from another, and when an instance is
- deleted, so are the cached values for that instance.
-
- >>> b = MyClass()
- >>> for x in range(35):
- ... res = b.method(x)
- >>> b.calls
- 35
- >>> a.method(0)
- 0
- >>> a.calls
- 75
-
- Note that if method had been decorated with ``functools.lru_cache()``,
- a.calls would have been 76 (due to the cached value of 0 having been
- flushed by the 'b' instance).
-
- Clear the cache with ``.cache_clear()``
-
- >>> a.method.cache_clear()
-
- Same for a method that hasn't yet been called.
-
- >>> c = MyClass()
- >>> c.method.cache_clear()
-
- Another cache wrapper may be supplied:
-
- >>> cache = functools.lru_cache(maxsize=2)
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
- >>> a = MyClass()
- >>> a.method2()
- 3
-
- Caution - do not subsequently wrap the method with another decorator, such
- as ``@property``, which changes the semantics of the function.
-
- See also
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
- for another implementation and additional justification.
- """
-
- def wrapper(self: object, *args: object, **kwargs: object) -> object:
- # it's the first call, replace the method with a cached, bound method
- bound_method: CallableT = types.MethodType( # type: ignore[assignment]
- method, self
- )
- cached_method = cache_wrapper(bound_method)
- setattr(self, method.__name__, cached_method)
- return cached_method(*args, **kwargs)
-
- # Support cache clear even before cache has been created.
- wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
-
- return ( # type: ignore[return-value]
- _special_method_cache(method, cache_wrapper) or wrapper
- )
-
-
-def _special_method_cache(method, cache_wrapper):
- """
- Because Python treats special methods differently, it's not
- possible to use instance attributes to implement the cached
- methods.
-
- Instead, install the wrapper method under a different name
- and return a simple proxy to that wrapper.
-
- https://github.com/jaraco/jaraco.functools/issues/5
- """
- name = method.__name__
- special_names = '__getattr__', '__getitem__'
- if name not in special_names:
- return
-
- wrapper_name = '__cached' + name
-
- def proxy(self, *args, **kwargs):
- if wrapper_name not in vars(self):
- bound = types.MethodType(method, self)
- cache = cache_wrapper(bound)
- setattr(self, wrapper_name, cache)
- else:
- cache = getattr(self, wrapper_name)
- return cache(*args, **kwargs)
-
- return proxy
-
-
-def apply(transform):
- """
- Decorate a function with a transform function that is
- invoked on results returned from the decorated function.
-
- >>> @apply(reversed)
- ... def get_numbers(start):
- ... "doc for get_numbers"
- ... return range(start, start+3)
- >>> list(get_numbers(4))
- [6, 5, 4]
- >>> get_numbers.__doc__
- 'doc for get_numbers'
- """
-
- def wrap(func):
- return functools.wraps(func)(compose(transform, func))
-
- return wrap
-
-
-def result_invoke(action):
- r"""
- Decorate a function with an action function that is
- invoked on the results returned from the decorated
- function (for its side-effect), then return the original
- result.
-
- >>> @result_invoke(print)
- ... def add_two(a, b):
- ... return a + b
- >>> x = add_two(2, 3)
- 5
- >>> x
- 5
- """
-
- def wrap(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- result = func(*args, **kwargs)
- action(result)
- return result
-
- return wrapper
-
- return wrap
-
-
-def call_aside(f, *args, **kwargs):
- """
- Call a function for its side effect after initialization.
-
- >>> @call_aside
- ... def func(): print("called")
- called
- >>> func()
- called
-
- Use functools.partial to pass parameters to the initial call
-
- >>> @functools.partial(call_aside, name='bingo')
- ... def func(name): print("called with", name)
- called with bingo
- """
- f(*args, **kwargs)
- return f
-
-
-class Throttler:
- """
- Rate-limit a function (or other callable)
- """
-
- def __init__(self, func, max_rate=float('Inf')):
- if isinstance(func, Throttler):
- func = func.func
- self.func = func
- self.max_rate = max_rate
- self.reset()
-
- def reset(self):
- self.last_called = 0
-
- def __call__(self, *args, **kwargs):
- self._wait()
- return self.func(*args, **kwargs)
-
- def _wait(self):
- "ensure at least 1/max_rate seconds from last call"
- elapsed = time.time() - self.last_called
- must_wait = 1 / self.max_rate - elapsed
- time.sleep(max(0, must_wait))
- self.last_called = time.time()
-
- def __get__(self, obj, type=None):
- return first_invoke(self._wait, functools.partial(self.func, obj))
-
-
-def first_invoke(func1, func2):
- """
- Return a function that when invoked will invoke func1 without
- any parameters (for its side-effect) and then invoke func2
- with whatever parameters were passed, returning its result.
- """
-
- def wrapper(*args, **kwargs):
- func1()
- return func2(*args, **kwargs)
-
- return wrapper
-
-
-def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
- """
- Given a callable func, trap the indicated exceptions
- for up to 'retries' times, invoking cleanup on the
- exception. On the final attempt, allow any exceptions
- to propagate.
- """
- attempts = itertools.count() if retries == float('inf') else range(retries)
- for attempt in attempts:
- try:
- return func()
- except trap:
- cleanup()
-
- return func()
-
-
-def retry(*r_args, **r_kwargs):
- """
- Decorator wrapper for retry_call. Accepts arguments to retry_call
- except func and then returns a decorator for the decorated function.
-
- Ex:
-
- >>> @retry(retries=3)
- ... def my_func(a, b):
- ... "this is my funk"
- ... print(a, b)
- >>> my_func.__doc__
- 'this is my funk'
- """
-
- def decorate(func):
- @functools.wraps(func)
- def wrapper(*f_args, **f_kwargs):
- bound = functools.partial(func, *f_args, **f_kwargs)
- return retry_call(bound, *r_args, **r_kwargs)
-
- return wrapper
-
- return decorate
-
-
-def print_yielded(func):
- """
- Convert a generator into a function that prints all yielded elements
-
- >>> @print_yielded
- ... def x():
- ... yield 3; yield None
- >>> x()
- 3
- None
- """
- print_all = functools.partial(map, print)
- print_results = compose(more_itertools.consume, print_all, func)
- return functools.wraps(func)(print_results)
-
-
-def pass_none(func):
- """
- Wrap func so it's not called if its first param is None
-
- >>> print_text = pass_none(print)
- >>> print_text('text')
- text
- >>> print_text(None)
- """
-
- @functools.wraps(func)
- def wrapper(param, *args, **kwargs):
- if param is not None:
- return func(param, *args, **kwargs)
-
- return wrapper
-
-
-def assign_params(func, namespace):
- """
- Assign parameters from namespace where func solicits.
-
- >>> def func(x, y=3):
- ... print(x, y)
- >>> assigned = assign_params(func, dict(x=2, z=4))
- >>> assigned()
- 2 3
-
- The usual errors are raised if a function doesn't receive
- its required parameters:
-
- >>> assigned = assign_params(func, dict(y=3, z=4))
- >>> assigned()
- Traceback (most recent call last):
- TypeError: func() ...argument...
-
- It even works on methods:
-
- >>> class Handler:
- ... def meth(self, arg):
- ... print(arg)
- >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
- crystal
- """
- sig = inspect.signature(func)
- params = sig.parameters.keys()
- call_ns = {k: namespace[k] for k in params if k in namespace}
- return functools.partial(func, **call_ns)
-
-
-def save_method_args(method):
- """
- Wrap a method such that when it is called, the args and kwargs are
- saved on the method.
-
- >>> class MyClass:
- ... @save_method_args
- ... def method(self, a, b):
- ... print(a, b)
- >>> my_ob = MyClass()
- >>> my_ob.method(1, 2)
- 1 2
- >>> my_ob._saved_method.args
- (1, 2)
- >>> my_ob._saved_method.kwargs
- {}
- >>> my_ob.method(a=3, b='foo')
- 3 foo
- >>> my_ob._saved_method.args
- ()
- >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
- True
-
- The arguments are stored on the instance, allowing for
- different instance to save different args.
-
- >>> your_ob = MyClass()
- >>> your_ob.method({str('x'): 3}, b=[4])
- {'x': 3} [4]
- >>> your_ob._saved_method.args
- ({'x': 3},)
- >>> my_ob._saved_method.args
- ()
- """
- args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
-
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- attr_name = '_saved_' + method.__name__
- attr = args_and_kwargs(args, kwargs)
- setattr(self, attr_name, attr)
- return method(self, *args, **kwargs)
-
- return wrapper
-
-
-def except_(*exceptions, replace=None, use=None):
- """
- Replace the indicated exceptions, if raised, with the indicated
- literal replacement or evaluated expression (if present).
-
- >>> safe_int = except_(ValueError)(int)
- >>> safe_int('five')
- >>> safe_int('5')
- 5
-
- Specify a literal replacement with ``replace``.
-
- >>> safe_int_r = except_(ValueError, replace=0)(int)
- >>> safe_int_r('five')
- 0
-
- Provide an expression to ``use`` to pass through particular parameters.
-
- >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
- >>> safe_int_pt('five')
- 'five'
-
- """
-
- def decorate(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except exceptions:
- try:
- return eval(use)
- except TypeError:
- return replace
-
- return wrapper
-
- return decorate
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py
deleted file mode 100644
index c466378c..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py
+++ /dev/null
@@ -1,599 +0,0 @@
-import re
-import itertools
-import textwrap
-import functools
-
-try:
- from importlib.resources import files # type: ignore
-except ImportError: # pragma: nocover
- from pkg_resources.extern.importlib_resources import files # type: ignore
-
-from pkg_resources.extern.jaraco.functools import compose, method_cache
-from pkg_resources.extern.jaraco.context import ExceptionTrap
-
-
-def substitution(old, new):
- """
- Return a function that will perform a substitution on a string
- """
- return lambda s: s.replace(old, new)
-
-
-def multi_substitution(*substitutions):
- """
- Take a sequence of pairs specifying substitutions, and create
- a function that performs those substitutions.
-
- >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
- 'baz'
- """
- substitutions = itertools.starmap(substitution, substitutions)
- # compose function applies last function first, so reverse the
- # substitutions to get the expected order.
- substitutions = reversed(tuple(substitutions))
- return compose(*substitutions)
-
-
-class FoldedCase(str):
- """
- A case insensitive string class; behaves just like str
- except compares equal when the only variation is case.
-
- >>> s = FoldedCase('hello world')
-
- >>> s == 'Hello World'
- True
-
- >>> 'Hello World' == s
- True
-
- >>> s != 'Hello World'
- False
-
- >>> s.index('O')
- 4
-
- >>> s.split('O')
- ['hell', ' w', 'rld']
-
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
- ['alpha', 'Beta', 'GAMMA']
-
- Sequence membership is straightforward.
-
- >>> "Hello World" in [s]
- True
- >>> s in ["Hello World"]
- True
-
- You may test for set inclusion, but candidate and elements
- must both be folded.
-
- >>> FoldedCase("Hello World") in {s}
- True
- >>> s in {FoldedCase("Hello World")}
- True
-
- String inclusion works as long as the FoldedCase object
- is on the right.
-
- >>> "hello" in FoldedCase("Hello World")
- True
-
- But not if the FoldedCase object is on the left:
-
- >>> FoldedCase('hello') in 'Hello World'
- False
-
- In that case, use ``in_``:
-
- >>> FoldedCase('hello').in_('Hello World')
- True
-
- >>> FoldedCase('hello') > FoldedCase('Hello')
- False
- """
-
- def __lt__(self, other):
- return self.lower() < other.lower()
-
- def __gt__(self, other):
- return self.lower() > other.lower()
-
- def __eq__(self, other):
- return self.lower() == other.lower()
-
- def __ne__(self, other):
- return self.lower() != other.lower()
-
- def __hash__(self):
- return hash(self.lower())
-
- def __contains__(self, other):
- return super().lower().__contains__(other.lower())
-
- def in_(self, other):
- "Does self appear in other?"
- return self in FoldedCase(other)
-
- # cache lower since it's likely to be called frequently.
- @method_cache
- def lower(self):
- return super().lower()
-
- def index(self, sub):
- return self.lower().index(sub.lower())
-
- def split(self, splitter=' ', maxsplit=0):
- pattern = re.compile(re.escape(splitter), re.I)
- return pattern.split(self, maxsplit)
-
-
-# Python 3.8 compatibility
-_unicode_trap = ExceptionTrap(UnicodeDecodeError)
-
-
-@_unicode_trap.passes
-def is_decodable(value):
- r"""
- Return True if the supplied value is decodable (using the default
- encoding).
-
- >>> is_decodable(b'\xff')
- False
- >>> is_decodable(b'\x32')
- True
- """
- value.decode()
-
-
-def is_binary(value):
- r"""
- Return True if the value appears to be binary (that is, it's a byte
- string and isn't decodable).
-
- >>> is_binary(b'\xff')
- True
- >>> is_binary('\xff')
- False
- """
- return isinstance(value, bytes) and not is_decodable(value)
-
-
-def trim(s):
- r"""
- Trim something like a docstring to remove the whitespace that
- is common due to indentation and formatting.
-
- >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
- 'foo = bar\n\tbar = baz'
- """
- return textwrap.dedent(s).strip()
-
-
-def wrap(s):
- """
- Wrap lines of text, retaining existing newlines as
- paragraph markers.
-
- >>> print(wrap(lorem_ipsum))
- Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
- eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
- minim veniam, quis nostrud exercitation ullamco laboris nisi ut
- aliquip ex ea commodo consequat. Duis aute irure dolor in
- reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
- pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
- culpa qui officia deserunt mollit anim id est laborum.
-
- Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
- varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
- magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
- gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
- risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
- eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
- fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
- a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
- neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
- sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
- nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
- quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
- molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
- """
- paragraphs = s.splitlines()
- wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
- return '\n\n'.join(wrapped)
-
-
-def unwrap(s):
- r"""
- Given a multi-line string, return an unwrapped version.
-
- >>> wrapped = wrap(lorem_ipsum)
- >>> wrapped.count('\n')
- 20
- >>> unwrapped = unwrap(wrapped)
- >>> unwrapped.count('\n')
- 1
- >>> print(unwrapped)
- Lorem ipsum dolor sit amet, consectetur adipiscing ...
- Curabitur pretium tincidunt lacus. Nulla gravida orci ...
-
- """
- paragraphs = re.split(r'\n\n+', s)
- cleaned = (para.replace('\n', ' ') for para in paragraphs)
- return '\n'.join(cleaned)
-
-
-
-
-class Splitter(object):
- """object that will split a string with the given arguments for each call
-
- >>> s = Splitter(',')
- >>> s('hello, world, this is your, master calling')
- ['hello', ' world', ' this is your', ' master calling']
- """
-
- def __init__(self, *args):
- self.args = args
-
- def __call__(self, s):
- return s.split(*self.args)
-
-
-def indent(string, prefix=' ' * 4):
- """
- >>> indent('foo')
- ' foo'
- """
- return prefix + string
-
-
-class WordSet(tuple):
- """
- Given an identifier, return the words that identifier represents,
- whether in camel case, underscore-separated, etc.
-
- >>> WordSet.parse("camelCase")
- ('camel', 'Case')
-
- >>> WordSet.parse("under_sep")
- ('under', 'sep')
-
- Acronyms should be retained
-
- >>> WordSet.parse("firstSNL")
- ('first', 'SNL')
-
- >>> WordSet.parse("you_and_I")
- ('you', 'and', 'I')
-
- >>> WordSet.parse("A simple test")
- ('A', 'simple', 'test')
-
- Multiple caps should not interfere with the first cap of another word.
-
- >>> WordSet.parse("myABCClass")
- ('my', 'ABC', 'Class')
-
- The result is a WordSet, so you can get the form you need.
-
- >>> WordSet.parse("myABCClass").underscore_separated()
- 'my_ABC_Class'
-
- >>> WordSet.parse('a-command').camel_case()
- 'ACommand'
-
- >>> WordSet.parse('someIdentifier').lowered().space_separated()
- 'some identifier'
-
- Slices of the result should return another WordSet.
-
- >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
- 'out_of_context'
-
- >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
- 'word set'
-
- >>> example = WordSet.parse('figured it out')
- >>> example.headless_camel_case()
- 'figuredItOut'
- >>> example.dash_separated()
- 'figured-it-out'
-
- """
-
- _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
-
- def capitalized(self):
- return WordSet(word.capitalize() for word in self)
-
- def lowered(self):
- return WordSet(word.lower() for word in self)
-
- def camel_case(self):
- return ''.join(self.capitalized())
-
- def headless_camel_case(self):
- words = iter(self)
- first = next(words).lower()
- new_words = itertools.chain((first,), WordSet(words).camel_case())
- return ''.join(new_words)
-
- def underscore_separated(self):
- return '_'.join(self)
-
- def dash_separated(self):
- return '-'.join(self)
-
- def space_separated(self):
- return ' '.join(self)
-
- def trim_right(self, item):
- """
- Remove the item from the end of the set.
-
- >>> WordSet.parse('foo bar').trim_right('foo')
- ('foo', 'bar')
- >>> WordSet.parse('foo bar').trim_right('bar')
- ('foo',)
- >>> WordSet.parse('').trim_right('bar')
- ()
- """
- return self[:-1] if self and self[-1] == item else self
-
- def trim_left(self, item):
- """
- Remove the item from the beginning of the set.
-
- >>> WordSet.parse('foo bar').trim_left('foo')
- ('bar',)
- >>> WordSet.parse('foo bar').trim_left('bar')
- ('foo', 'bar')
- >>> WordSet.parse('').trim_left('bar')
- ()
- """
- return self[1:] if self and self[0] == item else self
-
- def trim(self, item):
- """
- >>> WordSet.parse('foo bar').trim('foo')
- ('bar',)
- """
- return self.trim_left(item).trim_right(item)
-
- def __getitem__(self, item):
- result = super(WordSet, self).__getitem__(item)
- if isinstance(item, slice):
- result = WordSet(result)
- return result
-
- @classmethod
- def parse(cls, identifier):
- matches = cls._pattern.finditer(identifier)
- return WordSet(match.group(0) for match in matches)
-
- @classmethod
- def from_class_name(cls, subject):
- return cls.parse(subject.__class__.__name__)
-
-
-# for backward compatibility
-words = WordSet.parse
-
-
-def simple_html_strip(s):
- r"""
- Remove HTML from the string `s`.
-
- >>> str(simple_html_strip(''))
- ''
-
- >>> print(simple_html_strip('A stormy day in paradise'))
- A stormy day in paradise
-
- >>> print(simple_html_strip('Somebody tell the truth.'))
- Somebody tell the truth.
-
- >>> print(simple_html_strip('What about \nmultiple lines?'))
- What about
- multiple lines?
- """
- html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL)
- texts = (match.group(3) or '' for match in html_stripper.finditer(s))
- return ''.join(texts)
-
-
-class SeparatedValues(str):
- """
- A string separated by a separator. Overrides __iter__ for getting
- the values.
-
- >>> list(SeparatedValues('a,b,c'))
- ['a', 'b', 'c']
-
- Whitespace is stripped and empty values are discarded.
-
- >>> list(SeparatedValues(' a, b , c, '))
- ['a', 'b', 'c']
- """
-
- separator = ','
-
- def __iter__(self):
- parts = self.split(self.separator)
- return filter(None, (part.strip() for part in parts))
-
-
-class Stripper:
- r"""
- Given a series of lines, find the common prefix and strip it from them.
-
- >>> lines = [
- ... 'abcdefg\n',
- ... 'abc\n',
- ... 'abcde\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix
- 'abc'
- >>> list(res.lines)
- ['defg\n', '\n', 'de\n']
-
- If no prefix is common, nothing should be stripped.
-
- >>> lines = [
- ... 'abcd\n',
- ... '1234\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix = ''
- >>> list(res.lines)
- ['abcd\n', '1234\n']
- """
-
- def __init__(self, prefix, lines):
- self.prefix = prefix
- self.lines = map(self, lines)
-
- @classmethod
- def strip_prefix(cls, lines):
- prefix_lines, lines = itertools.tee(lines)
- prefix = functools.reduce(cls.common_prefix, prefix_lines)
- return cls(prefix, lines)
-
- def __call__(self, line):
- if not self.prefix:
- return line
- null, prefix, rest = line.partition(self.prefix)
- return rest
-
- @staticmethod
- def common_prefix(s1, s2):
- """
- Return the common prefix of two lines.
- """
- index = min(len(s1), len(s2))
- while s1[:index] != s2[:index]:
- index -= 1
- return s1[:index]
-
-
-def remove_prefix(text, prefix):
- """
- Remove the prefix from the text if it exists.
-
- >>> remove_prefix('underwhelming performance', 'underwhelming ')
- 'performance'
-
- >>> remove_prefix('something special', 'sample')
- 'something special'
- """
- null, prefix, rest = text.rpartition(prefix)
- return rest
-
-
-def remove_suffix(text, suffix):
- """
- Remove the suffix from the text if it exists.
-
- >>> remove_suffix('name.git', '.git')
- 'name'
-
- >>> remove_suffix('something special', 'sample')
- 'something special'
- """
- rest, suffix, null = text.partition(suffix)
- return rest
-
-
-def normalize_newlines(text):
- r"""
- Replace alternate newlines with the canonical newline.
-
- >>> normalize_newlines('Lorem Ipsum\u2029')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\r\n')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\x85')
- 'Lorem Ipsum\n'
- """
- newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
- pattern = '|'.join(newlines)
- return re.sub(pattern, '\n', text)
-
-
-def _nonblank(str):
- return str and not str.startswith('#')
-
-
-@functools.singledispatch
-def yield_lines(iterable):
- r"""
- Yield valid lines of a string or iterable.
-
- >>> list(yield_lines(''))
- []
- >>> list(yield_lines(['foo', 'bar']))
- ['foo', 'bar']
- >>> list(yield_lines('foo\nbar'))
- ['foo', 'bar']
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
- ['foo', 'baz #comment']
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
- ['foo', 'bar', 'baz', 'bing']
- """
- return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
- return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-def drop_comment(line):
- """
- Drop comments.
-
- >>> drop_comment('foo # bar')
- 'foo'
-
- A hash without a space may be in a URL.
-
- >>> drop_comment('http://example.com/foo#bar')
- 'http://example.com/foo#bar'
- """
- return line.partition(' #')[0]
-
-
-def join_continuation(lines):
- r"""
- Join lines continued by a trailing backslash.
-
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
- ['foobarbaz']
-
- Not sure why, but...
- The character preceeding the backslash is also elided.
-
- >>> list(join_continuation(['goo\\', 'dly']))
- ['godly']
-
- A terrible idea, but...
- If no line is available to continue, suppress the lines.
-
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
- ['foo']
- """
- lines = iter(lines)
- for item in lines:
- while item.endswith('\\'):
- try:
- item = item[:-2].strip() + next(lines)
- except StopIteration:
- return
- yield item
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py
deleted file mode 100644
index ea38bef1..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .more import * # noqa
-from .recipes import * # noqa
-
-__version__ = '8.12.0'
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/more.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/more.py
deleted file mode 100644
index 6b6a5cab..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/more.py
+++ /dev/null
@@ -1,4316 +0,0 @@
-import warnings
-
-from collections import Counter, defaultdict, deque, abc
-from collections.abc import Sequence
-from functools import partial, reduce, wraps
-from heapq import merge, heapify, heapreplace, heappop
-from itertools import (
- chain,
- compress,
- count,
- cycle,
- dropwhile,
- groupby,
- islice,
- repeat,
- starmap,
- takewhile,
- tee,
- zip_longest,
-)
-from math import exp, factorial, floor, log
-from queue import Empty, Queue
-from random import random, randrange, uniform
-from operator import itemgetter, mul, sub, gt, lt, ge, le
-from sys import hexversion, maxsize
-from time import monotonic
-
-from .recipes import (
- consume,
- flatten,
- pairwise,
- powerset,
- take,
- unique_everseen,
-)
-
-__all__ = [
- 'AbortThread',
- 'SequenceView',
- 'UnequalIterablesError',
- 'adjacent',
- 'all_unique',
- 'always_iterable',
- 'always_reversible',
- 'bucket',
- 'callback_iter',
- 'chunked',
- 'chunked_even',
- 'circular_shifts',
- 'collapse',
- 'collate',
- 'combination_index',
- 'consecutive_groups',
- 'consumer',
- 'count_cycle',
- 'countable',
- 'difference',
- 'distinct_combinations',
- 'distinct_permutations',
- 'distribute',
- 'divide',
- 'duplicates_everseen',
- 'duplicates_justseen',
- 'exactly_n',
- 'filter_except',
- 'first',
- 'groupby_transform',
- 'ichunked',
- 'ilen',
- 'interleave',
- 'interleave_evenly',
- 'interleave_longest',
- 'intersperse',
- 'is_sorted',
- 'islice_extended',
- 'iterate',
- 'last',
- 'locate',
- 'lstrip',
- 'make_decorator',
- 'map_except',
- 'map_if',
- 'map_reduce',
- 'mark_ends',
- 'minmax',
- 'nth_or_last',
- 'nth_permutation',
- 'nth_product',
- 'numeric_range',
- 'one',
- 'only',
- 'padded',
- 'partitions',
- 'peekable',
- 'permutation_index',
- 'product_index',
- 'raise_',
- 'repeat_each',
- 'repeat_last',
- 'replace',
- 'rlocate',
- 'rstrip',
- 'run_length',
- 'sample',
- 'seekable',
- 'set_partitions',
- 'side_effect',
- 'sliced',
- 'sort_together',
- 'split_after',
- 'split_at',
- 'split_before',
- 'split_into',
- 'split_when',
- 'spy',
- 'stagger',
- 'strip',
- 'strictly_n',
- 'substrings',
- 'substrings_indexes',
- 'time_limited',
- 'unique_in_window',
- 'unique_to_each',
- 'unzip',
- 'value_chain',
- 'windowed',
- 'windowed_complete',
- 'with_iter',
- 'zip_broadcast',
- 'zip_equal',
- 'zip_offset',
-]
-
-
-_marker = object()
-
-
-def chunked(iterable, n, strict=False):
- """Break *iterable* into lists of length *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
- [[1, 2, 3], [4, 5, 6]]
-
- By the default, the last yielded list will have fewer than *n* elements
- if the length of *iterable* is not divisible by *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
- [[1, 2, 3], [4, 5, 6], [7, 8]]
-
- To use a fill-in value instead, see the :func:`grouper` recipe.
-
- If the length of *iterable* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- list is yielded.
-
- """
- iterator = iter(partial(take, n, iter(iterable)), [])
- if strict:
- if n is None:
- raise ValueError('n must not be None when using strict mode.')
-
- def ret():
- for chunk in iterator:
- if len(chunk) != n:
- raise ValueError('iterable is not divisible by n.')
- yield chunk
-
- return iter(ret())
- else:
- return iterator
-
-
-def first(iterable, default=_marker):
- """Return the first item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> first([0, 1, 2, 3])
- 0
- >>> first([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
-
- :func:`first` is useful when you have a generator of expensive-to-retrieve
- values and want any arbitrary one. It is marginally shorter than
- ``next(iter(iterable), default)``.
-
- """
- try:
- return next(iter(iterable))
- except StopIteration as e:
- if default is _marker:
- raise ValueError(
- 'first() was called on an empty iterable, and no '
- 'default value was provided.'
- ) from e
- return default
-
-
-def last(iterable, default=_marker):
- """Return the last item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> last([0, 1, 2, 3])
- 3
- >>> last([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- try:
- if isinstance(iterable, Sequence):
- return iterable[-1]
- # Work around https://bugs.python.org/issue38525
- elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
- return next(reversed(iterable))
- else:
- return deque(iterable, maxlen=1)[-1]
- except (IndexError, TypeError, StopIteration):
- if default is _marker:
- raise ValueError(
- 'last() was called on an empty iterable, and no default was '
- 'provided.'
- )
- return default
-
-
-def nth_or_last(iterable, n, default=_marker):
- """Return the nth or the last item of *iterable*,
- or *default* if *iterable* is empty.
-
- >>> nth_or_last([0, 1, 2, 3], 2)
- 2
- >>> nth_or_last([0, 1], 2)
- 1
- >>> nth_or_last([], 0, 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- return last(islice(iterable, n + 1), default=default)
-
-
-class peekable:
- """Wrap an iterator to allow lookahead and prepending elements.
-
- Call :meth:`peek` on the result to get the value that will be returned
- by :func:`next`. This won't advance the iterator:
-
- >>> p = peekable(['a', 'b'])
- >>> p.peek()
- 'a'
- >>> next(p)
- 'a'
-
- Pass :meth:`peek` a default value to return that instead of raising
- ``StopIteration`` when the iterator is exhausted.
-
- >>> p = peekable([])
- >>> p.peek('hi')
- 'hi'
-
- peekables also offer a :meth:`prepend` method, which "inserts" items
- at the head of the iterable:
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> p.peek()
- 11
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- peekables can be indexed. Index 0 is the item that will be returned by
- :func:`next`, index 1 is the item after that, and so on:
- The values up to the given index will be cached.
-
- >>> p = peekable(['a', 'b', 'c', 'd'])
- >>> p[0]
- 'a'
- >>> p[1]
- 'b'
- >>> next(p)
- 'a'
-
- Negative indexes are supported, but be aware that they will cache the
- remaining items in the source iterator, which may require significant
- storage.
-
- To check whether a peekable is exhausted, check its truth value:
-
- >>> p = peekable(['a', 'b'])
- >>> if p: # peekable has items
- ... list(p)
- ['a', 'b']
- >>> if not p: # peekable is exhausted
- ... list(p)
- []
-
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self._cache = deque()
-
- def __iter__(self):
- return self
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- """Return the item that will be next returned from ``next()``.
-
- Return ``default`` if there are no items left. If ``default`` is not
- provided, raise ``StopIteration``.
-
- """
- if not self._cache:
- try:
- self._cache.append(next(self._it))
- except StopIteration:
- if default is _marker:
- raise
- return default
- return self._cache[0]
-
- def prepend(self, *items):
- """Stack up items to be the next ones returned from ``next()`` or
- ``self.peek()``. The items will be returned in
- first in, first out order::
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- It is possible, by prepending items, to "resurrect" a peekable that
- previously raised ``StopIteration``.
-
- >>> p = peekable([])
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
- >>> p.prepend(1)
- >>> next(p)
- 1
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
-
- """
- self._cache.extendleft(reversed(items))
-
- def __next__(self):
- if self._cache:
- return self._cache.popleft()
-
- return next(self._it)
-
- def _get_slice(self, index):
- # Normalize the slice's arguments
- step = 1 if (index.step is None) else index.step
- if step > 0:
- start = 0 if (index.start is None) else index.start
- stop = maxsize if (index.stop is None) else index.stop
- elif step < 0:
- start = -1 if (index.start is None) else index.start
- stop = (-maxsize - 1) if (index.stop is None) else index.stop
- else:
- raise ValueError('slice step cannot be zero')
-
- # If either the start or stop index is negative, we'll need to cache
- # the rest of the iterable in order to slice from the right side.
- if (start < 0) or (stop < 0):
- self._cache.extend(self._it)
- # Otherwise we'll need to find the rightmost index and cache to that
- # point.
- else:
- n = min(max(start, stop) + 1, maxsize)
- cache_len = len(self._cache)
- if n >= cache_len:
- self._cache.extend(islice(self._it, n - cache_len))
-
- return list(self._cache)[index]
-
- def __getitem__(self, index):
- if isinstance(index, slice):
- return self._get_slice(index)
-
- cache_len = len(self._cache)
- if index < 0:
- self._cache.extend(self._it)
- elif index >= cache_len:
- self._cache.extend(islice(self._it, index + 1 - cache_len))
-
- return self._cache[index]
-
-
-def collate(*iterables, **kwargs):
- """Return a sorted merge of the items from each of several already-sorted
- *iterables*.
-
- >>> list(collate('ACDZ', 'AZ', 'JKL'))
- ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
-
- Works lazily, keeping only the next value from each iterable in memory. Use
- :func:`collate` to, for example, perform a n-way mergesort of items that
- don't fit in memory.
-
- If a *key* function is specified, the iterables will be sorted according
- to its result:
-
- >>> key = lambda s: int(s) # Sort by numeric value, not by string
- >>> list(collate(['1', '10'], ['2', '11'], key=key))
- ['1', '2', '10', '11']
-
-
- If the *iterables* are sorted in descending order, set *reverse* to
- ``True``:
-
- >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
- [5, 4, 3, 2, 1, 0]
-
- If the elements of the passed-in iterables are out of order, you might get
- unexpected results.
-
- On Python 3.5+, this function is an alias for :func:`heapq.merge`.
-
- """
- warnings.warn(
- "collate is no longer part of more_itertools, use heapq.merge",
- DeprecationWarning,
- )
- return merge(*iterables, **kwargs)
-
-
-def consumer(func):
- """Decorator that automatically advances a PEP-342-style "reverse iterator"
- to its first yield point so you don't have to call ``next()`` on it
- manually.
-
- >>> @consumer
- ... def tally():
- ... i = 0
- ... while True:
- ... print('Thing number %s is %s.' % (i, (yield)))
- ... i += 1
- ...
- >>> t = tally()
- >>> t.send('red')
- Thing number 0 is red.
- >>> t.send('fish')
- Thing number 1 is fish.
-
- Without the decorator, you would have to call ``next(t)`` before
- ``t.send()`` could be used.
-
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- gen = func(*args, **kwargs)
- next(gen)
- return gen
-
- return wrapper
-
-
-def ilen(iterable):
- """Return the number of items in *iterable*.
-
- >>> ilen(x for x in range(1000000) if x % 3 == 0)
- 333334
-
- This consumes the iterable, so handle with care.
-
- """
- # This approach was selected because benchmarks showed it's likely the
- # fastest of the known implementations at the time of writing.
- # See GitHub tracker: #236, #230.
- counter = count()
- deque(zip(iterable, counter), maxlen=0)
- return next(counter)
-
-
-def iterate(func, start):
- """Return ``start``, ``func(start)``, ``func(func(start))``, ...
-
- >>> from itertools import islice
- >>> list(islice(iterate(lambda x: 2*x, 1), 10))
- [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
- """
- while True:
- yield start
- start = func(start)
-
-
-def with_iter(context_manager):
- """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
-
- For example, this will close the file when the iterator is exhausted::
-
- upper_lines = (line.upper() for line in with_iter(open('foo')))
-
- Any context manager which returns an iterable is a candidate for
- ``with_iter``.
-
- """
- with context_manager as iterable:
- yield from iterable
-
-
-def one(iterable, too_short=None, too_long=None):
- """Return the first item from *iterable*, which is expected to contain only
- that item. Raise an exception if *iterable* is empty or has more than one
- item.
-
- :func:`one` is useful for ensuring that an iterable contains only one item.
- For example, it can be used to retrieve the result of a database query
- that is expected to return a single row.
-
- If *iterable* is empty, ``ValueError`` will be raised. You may specify a
- different exception with the *too_short* keyword:
-
- >>> it = []
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: too many items in iterable (expected 1)'
- >>> too_short = IndexError('too few items')
- >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- IndexError: too few items
-
- Similarly, if *iterable* contains more than one item, ``ValueError`` will
- be raised. You may specify a different exception with the *too_long*
- keyword:
-
- >>> it = ['too', 'many']
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 'too',
- 'many', and perhaps more.
- >>> too_long = RuntimeError
- >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- RuntimeError
-
- Note that :func:`one` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check iterable
- contents less destructively.
-
- """
- it = iter(iterable)
-
- try:
- first_value = next(it)
- except StopIteration as e:
- raise (
- too_short or ValueError('too few items in iterable (expected 1)')
- ) from e
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def raise_(exception, *args):
- raise exception(*args)
-
-
-def strictly_n(iterable, n, too_short=None, too_long=None):
- """Validate that *iterable* has exactly *n* items and return them if
- it does. If it has fewer than *n* items, call function *too_short*
- with those items. If it has more than *n* items, call function
- *too_long* with the first ``n + 1`` items.
-
- >>> iterable = ['a', 'b', 'c', 'd']
- >>> n = 4
- >>> list(strictly_n(iterable, n))
- ['a', 'b', 'c', 'd']
-
- By default, *too_short* and *too_long* are functions that raise
- ``ValueError``.
-
- >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: too few items in iterable (got 2)
-
- >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: too many items in iterable (got at least 3)
-
- You can instead supply functions that do something else.
- *too_short* will be called with the number of items in *iterable*.
- *too_long* will be called with `n + 1`.
-
- >>> def too_short(item_count):
- ... raise RuntimeError
- >>> it = strictly_n('abcd', 6, too_short=too_short)
- >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- RuntimeError
-
- >>> def too_long(item_count):
- ... print('The boss is going to hear about this')
- >>> it = strictly_n('abcdef', 4, too_long=too_long)
- >>> list(it)
- The boss is going to hear about this
- ['a', 'b', 'c', 'd']
-
- """
- if too_short is None:
- too_short = lambda item_count: raise_(
- ValueError,
- 'Too few items in iterable (got {})'.format(item_count),
- )
-
- if too_long is None:
- too_long = lambda item_count: raise_(
- ValueError,
- 'Too many items in iterable (got at least {})'.format(item_count),
- )
-
- it = iter(iterable)
- for i in range(n):
- try:
- item = next(it)
- except StopIteration:
- too_short(i)
- return
- else:
- yield item
-
- try:
- next(it)
- except StopIteration:
- pass
- else:
- too_long(n + 1)
-
-
-def distinct_permutations(iterable, r=None):
- """Yield successive distinct permutations of the elements in *iterable*.
-
- >>> sorted(distinct_permutations([1, 0, 1]))
- [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
-
- Equivalent to ``set(permutations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- Duplicate permutations arise when there are duplicated elements in the
- input iterable. The number of items returned is
- `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
- items input, and each `x_i` is the count of a distinct item in the input
- sequence.
-
- If *r* is given, only the *r*-length permutations are yielded.
-
- >>> sorted(distinct_permutations([1, 0, 1], r=2))
- [(0, 1), (1, 0), (1, 1)]
- >>> sorted(distinct_permutations(range(3), r=2))
- [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
-
- """
- # Algorithm: https://w.wiki/Qai
- def _full(A):
- while True:
- # Yield the permutation we have
- yield tuple(A)
-
- # Find the largest index i such that A[i] < A[i + 1]
- for i in range(size - 2, -1, -1):
- if A[i] < A[i + 1]:
- break
- # If no such index exists, this permutation is the last one
- else:
- return
-
- # Find the largest index j greater than j such that A[i] < A[j]
- for j in range(size - 1, i, -1):
- if A[i] < A[j]:
- break
-
- # Swap the value of A[i] with that of A[j], then reverse the
- # sequence from A[i + 1] to form the new permutation
- A[i], A[j] = A[j], A[i]
- A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
-
- # Algorithm: modified from the above
- def _partial(A, r):
- # Split A into the first r items and the last r items
- head, tail = A[:r], A[r:]
- right_head_indexes = range(r - 1, -1, -1)
- left_tail_indexes = range(len(tail))
-
- while True:
- # Yield the permutation we have
- yield tuple(head)
-
- # Starting from the right, find the first index of the head with
- # value smaller than the maximum value of the tail - call it i.
- pivot = tail[-1]
- for i in right_head_indexes:
- if head[i] < pivot:
- break
- pivot = head[i]
- else:
- return
-
- # Starting from the left, find the first value of the tail
- # with a value greater than head[i] and swap.
- for j in left_tail_indexes:
- if tail[j] > head[i]:
- head[i], tail[j] = tail[j], head[i]
- break
- # If we didn't find one, start from the right and find the first
- # index of the head with a value greater than head[i] and swap.
- else:
- for j in right_head_indexes:
- if head[j] > head[i]:
- head[i], head[j] = head[j], head[i]
- break
-
- # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
- tail += head[: i - r : -1] # head[i + 1:][::-1]
- i += 1
- head[i:], tail[:] = tail[: r - i], tail[r - i :]
-
- items = sorted(iterable)
-
- size = len(items)
- if r is None:
- r = size
-
- if 0 < r <= size:
- return _full(items) if (r == size) else _partial(items, r)
-
- return iter(() if r else ((),))
-
-
-def intersperse(e, iterable, n=1):
- """Intersperse filler element *e* among the items in *iterable*, leaving
- *n* items between each filler element.
-
- >>> list(intersperse('!', [1, 2, 3, 4, 5]))
- [1, '!', 2, '!', 3, '!', 4, '!', 5]
-
- >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
- [1, 2, None, 3, 4, None, 5]
-
- """
- if n == 0:
- raise ValueError('n must be > 0')
- elif n == 1:
- # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
- # islice(..., 1, None) -> x_0, e, x_1, e, x_2...
- return islice(interleave(repeat(e), iterable), 1, None)
- else:
- # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
- # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
- # flatten(...) -> x_0, x_1, e, x_2, x_3...
- filler = repeat([e])
- chunks = chunked(iterable, n)
- return flatten(islice(interleave(filler, chunks), 1, None))
-
-
-def unique_to_each(*iterables):
- """Return the elements from each of the input iterables that aren't in the
- other input iterables.
-
- For example, suppose you have a set of packages, each with a set of
- dependencies::
-
- {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
-
- If you remove one package, which dependencies can also be removed?
-
- If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
- associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
- ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
-
- >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
- [['A'], ['C'], ['D']]
-
- If there are duplicates in one input iterable that aren't in the others
- they will be duplicated in the output. Input order is preserved::
-
- >>> unique_to_each("mississippi", "missouri")
- [['p', 'p'], ['o', 'u', 'r']]
-
- It is assumed that the elements of each iterable are hashable.
-
- """
- pool = [list(it) for it in iterables]
- counts = Counter(chain.from_iterable(map(set, pool)))
- uniques = {element for element in counts if counts[element] == 1}
- return [list(filter(uniques.__contains__, it)) for it in pool]
-
-
-def windowed(seq, n, fillvalue=None, step=1):
- """Return a sliding window of width *n* over the given iterable.
-
- >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
- >>> list(all_windows)
- [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
-
- When the window is larger than the iterable, *fillvalue* is used in place
- of missing values:
-
- >>> list(windowed([1, 2, 3], 4))
- [(1, 2, 3, None)]
-
- Each window will advance in increments of *step*:
-
- >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
- [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
-
- To slide into the iterable's items, use :func:`chain` to add filler items
- to the left:
-
- >>> iterable = [1, 2, 3, 4]
- >>> n = 3
- >>> padding = [None] * (n - 1)
- >>> list(windowed(chain(padding, iterable), 3))
- [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
- """
- if n < 0:
- raise ValueError('n must be >= 0')
- if n == 0:
- yield tuple()
- return
- if step < 1:
- raise ValueError('step must be >= 1')
-
- window = deque(maxlen=n)
- i = n
- for _ in map(window.append, seq):
- i -= 1
- if not i:
- i = step
- yield tuple(window)
-
- size = len(window)
- if size < n:
- yield tuple(chain(window, repeat(fillvalue, n - size)))
- elif 0 < i < min(step, n):
- window += (fillvalue,) * i
- yield tuple(window)
-
-
-def substrings(iterable):
- """Yield all of the substrings of *iterable*.
-
- >>> [''.join(s) for s in substrings('more')]
- ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
-
- Note that non-string iterables can also be subdivided.
-
- >>> list(substrings([0, 1, 2]))
- [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
-
- """
- # The length-1 substrings
- seq = []
- for item in iter(iterable):
- seq.append(item)
- yield (item,)
- seq = tuple(seq)
- item_count = len(seq)
-
- # And the rest
- for n in range(2, item_count + 1):
- for i in range(item_count - n + 1):
- yield seq[i : i + n]
-
-
-def substrings_indexes(seq, reverse=False):
- """Yield all substrings and their positions in *seq*
-
- The items yielded will be a tuple of the form ``(substr, i, j)``, where
- ``substr == seq[i:j]``.
-
- This function only works for iterables that support slicing, such as
- ``str`` objects.
-
- >>> for item in substrings_indexes('more'):
- ... print(item)
- ('m', 0, 1)
- ('o', 1, 2)
- ('r', 2, 3)
- ('e', 3, 4)
- ('mo', 0, 2)
- ('or', 1, 3)
- ('re', 2, 4)
- ('mor', 0, 3)
- ('ore', 1, 4)
- ('more', 0, 4)
-
- Set *reverse* to ``True`` to yield the same items in the opposite order.
-
-
- """
- r = range(1, len(seq) + 1)
- if reverse:
- r = reversed(r)
- return (
- (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
- )
-
-
-class bucket:
- """Wrap *iterable* and return an object that buckets it iterable into
- child iterables based on a *key* function.
-
- >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
- >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
- >>> sorted(list(s)) # Get the keys
- ['a', 'b', 'c']
- >>> a_iterable = s['a']
- >>> next(a_iterable)
- 'a1'
- >>> next(a_iterable)
- 'a2'
- >>> list(s['b'])
- ['b1', 'b2', 'b3']
-
- The original iterable will be advanced and its items will be cached until
- they are used by the child iterables. This may require significant storage.
-
- By default, attempting to select a bucket to which no items belong will
- exhaust the iterable and cache all values.
- If you specify a *validator* function, selected buckets will instead be
- checked against it.
-
- >>> from itertools import count
- >>> it = count(1, 2) # Infinite sequence of odd numbers
- >>> key = lambda x: x % 10 # Bucket by last digit
- >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
- >>> s = bucket(it, key=key, validator=validator)
- >>> 2 in s
- False
- >>> list(s[2])
- []
-
- """
-
- def __init__(self, iterable, key, validator=None):
- self._it = iter(iterable)
- self._key = key
- self._cache = defaultdict(deque)
- self._validator = validator or (lambda x: True)
-
- def __contains__(self, value):
- if not self._validator(value):
- return False
-
- try:
- item = next(self[value])
- except StopIteration:
- return False
- else:
- self._cache[value].appendleft(item)
-
- return True
-
- def _get_values(self, value):
- """
- Helper to yield items from the parent iterator that match *value*.
- Items that don't match are stored in the local cache as they
- are encountered.
- """
- while True:
- # If we've cached some items that match the target value, emit
- # the first one and evict it from the cache.
- if self._cache[value]:
- yield self._cache[value].popleft()
- # Otherwise we need to advance the parent iterator to search for
- # a matching item, caching the rest.
- else:
- while True:
- try:
- item = next(self._it)
- except StopIteration:
- return
- item_value = self._key(item)
- if item_value == value:
- yield item
- break
- elif self._validator(item_value):
- self._cache[item_value].append(item)
-
- def __iter__(self):
- for item in self._it:
- item_value = self._key(item)
- if self._validator(item_value):
- self._cache[item_value].append(item)
-
- yield from self._cache.keys()
-
- def __getitem__(self, value):
- if not self._validator(value):
- return iter(())
-
- return self._get_values(value)
-
-
-def spy(iterable, n=1):
- """Return a 2-tuple with a list containing the first *n* elements of
- *iterable*, and an iterator with the same items as *iterable*.
- This allows you to "look ahead" at the items in the iterable without
- advancing it.
-
- There is one item in the list by default:
-
- >>> iterable = 'abcdefg'
- >>> head, iterable = spy(iterable)
- >>> head
- ['a']
- >>> list(iterable)
- ['a', 'b', 'c', 'd', 'e', 'f', 'g']
-
- You may use unpacking to retrieve items instead of lists:
-
- >>> (head,), iterable = spy('abcdefg')
- >>> head
- 'a'
- >>> (first, second), iterable = spy('abcdefg', 2)
- >>> first
- 'a'
- >>> second
- 'b'
-
- The number of items requested can be larger than the number of items in
- the iterable:
-
- >>> iterable = [1, 2, 3, 4, 5]
- >>> head, iterable = spy(iterable, 10)
- >>> head
- [1, 2, 3, 4, 5]
- >>> list(iterable)
- [1, 2, 3, 4, 5]
-
- """
- it = iter(iterable)
- head = take(n, it)
-
- return head.copy(), chain(head, it)
-
-
-def interleave(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- until the shortest is exhausted.
-
- >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7]
-
- For a version that doesn't terminate after the shortest iterable is
- exhausted, see :func:`interleave_longest`.
-
- """
- return chain.from_iterable(zip(*iterables))
-
-
-def interleave_longest(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- skipping any that are exhausted.
-
- >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7, 3, 8]
-
- This function produces the same output as :func:`roundrobin`, but may
- perform better for some inputs (in particular when the number of iterables
- is large).
-
- """
- i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
- return (x for x in i if x is not _marker)
-
-
-def interleave_evenly(iterables, lengths=None):
- """
- Interleave multiple iterables so that their elements are evenly distributed
- throughout the output sequence.
-
- >>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
- >>> list(interleave_evenly(iterables))
- [1, 2, 'a', 3, 4, 'b', 5]
-
- >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
- >>> list(interleave_evenly(iterables))
- [1, 6, 4, 2, 7, 3, 8, 5]
-
- This function requires iterables of known length. Iterables without
- ``__len__()`` can be used by manually specifying lengths with *lengths*:
-
- >>> from itertools import combinations, repeat
- >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
- >>> lengths = [4 * (4 - 1) // 2, 3]
- >>> list(interleave_evenly(iterables, lengths=lengths))
- [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
-
- Based on Bresenham's algorithm.
- """
- if lengths is None:
- try:
- lengths = [len(it) for it in iterables]
- except TypeError:
- raise ValueError(
- 'Iterable lengths could not be determined automatically. '
- 'Specify them with the lengths keyword.'
- )
- elif len(iterables) != len(lengths):
- raise ValueError('Mismatching number of iterables and lengths.')
-
- dims = len(lengths)
-
- # sort iterables by length, descending
- lengths_permute = sorted(
- range(dims), key=lambda i: lengths[i], reverse=True
- )
- lengths_desc = [lengths[i] for i in lengths_permute]
- iters_desc = [iter(iterables[i]) for i in lengths_permute]
-
- # the longest iterable is the primary one (Bresenham: the longest
- # distance along an axis)
- delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
- iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
- errors = [delta_primary // dims] * len(deltas_secondary)
-
- to_yield = sum(lengths)
- while to_yield:
- yield next(iter_primary)
- to_yield -= 1
- # update errors for each secondary iterable
- errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
-
- # those iterables for which the error is negative are yielded
- # ("diagonal step" in Bresenham)
- for i, e in enumerate(errors):
- if e < 0:
- yield next(iters_secondary[i])
- to_yield -= 1
- errors[i] += delta_primary
-
-
-def collapse(iterable, base_type=None, levels=None):
- """Flatten an iterable with multiple levels of nesting (e.g., a list of
- lists of tuples) into non-iterable types.
-
- >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
- >>> list(collapse(iterable))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and
- will not be collapsed.
-
- To avoid collapsing other types, specify *base_type*:
-
- >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
- >>> list(collapse(iterable, base_type=tuple))
- ['ab', ('cd', 'ef'), 'gh', 'ij']
-
- Specify *levels* to stop flattening after a certain level:
-
- >>> iterable = [('a', ['b']), ('c', ['d'])]
- >>> list(collapse(iterable)) # Fully flattened
- ['a', 'b', 'c', 'd']
- >>> list(collapse(iterable, levels=1)) # Only one level flattened
- ['a', ['b'], 'c', ['d']]
-
- """
-
- def walk(node, level):
- if (
- ((levels is not None) and (level > levels))
- or isinstance(node, (str, bytes))
- or ((base_type is not None) and isinstance(node, base_type))
- ):
- yield node
- return
-
- try:
- tree = iter(node)
- except TypeError:
- yield node
- return
- else:
- for child in tree:
- yield from walk(child, level + 1)
-
- yield from walk(iterable, 0)
-
-
-def side_effect(func, iterable, chunk_size=None, before=None, after=None):
- """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
- of items) before yielding the item.
-
- `func` must be a function that takes a single argument. Its return value
- will be discarded.
-
- *before* and *after* are optional functions that take no arguments. They
- will be executed before iteration starts and after it ends, respectively.
-
- `side_effect` can be used for logging, updating progress bars, or anything
- that is not functionally "pure."
-
- Emitting a status message:
-
- >>> from more_itertools import consume
- >>> func = lambda item: print('Received {}'.format(item))
- >>> consume(side_effect(func, range(2)))
- Received 0
- Received 1
-
- Operating on chunks of items:
-
- >>> pair_sums = []
- >>> func = lambda chunk: pair_sums.append(sum(chunk))
- >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
- [0, 1, 2, 3, 4, 5]
- >>> list(pair_sums)
- [1, 5, 9]
-
- Writing to a file-like object:
-
- >>> from io import StringIO
- >>> from more_itertools import consume
- >>> f = StringIO()
- >>> func = lambda x: print(x, file=f)
- >>> before = lambda: print(u'HEADER', file=f)
- >>> after = f.close
- >>> it = [u'a', u'b', u'c']
- >>> consume(side_effect(func, it, before=before, after=after))
- >>> f.closed
- True
-
- """
- try:
- if before is not None:
- before()
-
- if chunk_size is None:
- for item in iterable:
- func(item)
- yield item
- else:
- for chunk in chunked(iterable, chunk_size):
- func(chunk)
- yield from chunk
- finally:
- if after is not None:
- after()
-
-
-def sliced(seq, n, strict=False):
- """Yield slices of length *n* from the sequence *seq*.
-
- >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
- [(1, 2, 3), (4, 5, 6)]
-
- By the default, the last yielded slice will have fewer than *n* elements
- if the length of *seq* is not divisible by *n*:
-
- >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
- [(1, 2, 3), (4, 5, 6), (7, 8)]
-
- If the length of *seq* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- slice is yielded.
-
- This function will only work for iterables that support slicing.
- For non-sliceable iterables, see :func:`chunked`.
-
- """
- iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
- if strict:
-
- def ret():
- for _slice in iterator:
- if len(_slice) != n:
- raise ValueError("seq is not divisible by n.")
- yield _slice
-
- return iter(ret())
- else:
- return iterator
-
-
-def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
- """Yield lists of items from *iterable*, where each list is delimited by
- an item where callable *pred* returns ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b'))
- [['a'], ['c', 'd', 'c'], ['a']]
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1))
- [[0], [2], [4], [6], [8], []]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
- [[0], [2], [4, 5, 6, 7, 8, 9]]
-
- By default, the delimiting items are not included in the output.
- The include them, set *keep_separator* to ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
- [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item):
- yield buf
- if keep_separator:
- yield [item]
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- else:
- buf.append(item)
- yield buf
-
-
-def split_before(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends just before
- an item for which callable *pred* returns ``True``:
-
- >>> list(split_before('OneTwo', lambda s: s.isupper()))
- [['O', 'n', 'e'], ['T', 'w', 'o']]
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield [item] + list(it)
- return
- buf = []
- maxsplit -= 1
- buf.append(item)
- if buf:
- yield buf
-
-
-def split_after(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends with an
- item where callable *pred* returns ``True``:
-
- >>> list(split_after('one1two2', lambda s: s.isdigit()))
- [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0))
- [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- buf.append(item)
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- if buf:
- yield buf
-
-
-def split_when(iterable, pred, maxsplit=-1):
- """Split *iterable* into pieces based on the output of *pred*.
- *pred* should be a function that takes successive pairs of items and
- returns ``True`` if the iterable should be split in between them.
-
- For example, to find runs of increasing numbers, split the iterable when
- element ``i`` is larger than element ``i + 1``:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
- [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
- ... lambda x, y: x > y, maxsplit=2))
- [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- it = iter(iterable)
- try:
- cur_item = next(it)
- except StopIteration:
- return
-
- buf = [cur_item]
- for next_item in it:
- if pred(cur_item, next_item):
- yield buf
- if maxsplit == 1:
- yield [next_item] + list(it)
- return
- buf = []
- maxsplit -= 1
-
- buf.append(next_item)
- cur_item = next_item
-
- yield buf
-
-
-def split_into(iterable, sizes):
- """Yield a list of sequential items from *iterable* of length 'n' for each
- integer 'n' in *sizes*.
-
- >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
- [[1], [2, 3], [4, 5, 6]]
-
- If the sum of *sizes* is smaller than the length of *iterable*, then the
- remaining items of *iterable* will not be returned.
-
- >>> list(split_into([1,2,3,4,5,6], [2,3]))
- [[1, 2], [3, 4, 5]]
-
- If the sum of *sizes* is larger than the length of *iterable*, fewer items
- will be returned in the iteration that overruns *iterable* and further
- lists will be empty:
-
- >>> list(split_into([1,2,3,4], [1,2,3,4]))
- [[1], [2, 3], [4], []]
-
- When a ``None`` object is encountered in *sizes*, the returned list will
- contain items up to the end of *iterable* the same way that itertools.slice
- does:
-
- >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
- [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
-
- :func:`split_into` can be useful for grouping a series of items where the
- sizes of the groups are not uniform. An example would be where in a row
- from a table, multiple columns represent elements of the same feature
- (e.g. a point represented by x,y,z) but, the format is not the same for
- all columns.
- """
- # convert the iterable argument into an iterator so its contents can
- # be consumed by islice in case it is a generator
- it = iter(iterable)
-
- for size in sizes:
- if size is None:
- yield list(it)
- return
- else:
- yield list(islice(it, size))
-
-
-def padded(iterable, fillvalue=None, n=None, next_multiple=False):
- """Yield the elements from *iterable*, followed by *fillvalue*, such that
- at least *n* items are emitted.
-
- >>> list(padded([1, 2, 3], '?', 5))
- [1, 2, 3, '?', '?']
-
- If *next_multiple* is ``True``, *fillvalue* will be emitted until the
- number of items emitted is a multiple of *n*::
-
- >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
- [1, 2, 3, 4, None, None]
-
- If *n* is ``None``, *fillvalue* will be emitted indefinitely.
-
- """
- it = iter(iterable)
- if n is None:
- yield from chain(it, repeat(fillvalue))
- elif n < 1:
- raise ValueError('n must be at least 1')
- else:
- item_count = 0
- for item in it:
- yield item
- item_count += 1
-
- remaining = (n - item_count) % n if next_multiple else n - item_count
- for _ in range(remaining):
- yield fillvalue
-
-
-def repeat_each(iterable, n=2):
- """Repeat each element in *iterable* *n* times.
-
- >>> list(repeat_each('ABC', 3))
- ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
- """
- return chain.from_iterable(map(repeat, iterable, repeat(n)))
-
-
-def repeat_last(iterable, default=None):
- """After the *iterable* is exhausted, keep yielding its last element.
-
- >>> list(islice(repeat_last(range(3)), 5))
- [0, 1, 2, 2, 2]
-
- If the iterable is empty, yield *default* forever::
-
- >>> list(islice(repeat_last(range(0), 42), 5))
- [42, 42, 42, 42, 42]
-
- """
- item = _marker
- for item in iterable:
- yield item
- final = default if item is _marker else item
- yield from repeat(final)
-
-
-def distribute(n, iterable):
- """Distribute the items from *iterable* among *n* smaller iterables.
-
- >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 3, 5]
- >>> list(group_2)
- [2, 4, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 4, 7], [2, 5], [3, 6]]
-
- If the length of *iterable* is smaller than *n*, then the last returned
- iterables will be empty:
-
- >>> children = distribute(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function uses :func:`itertools.tee` and may require significant
- storage. If you need the order items in the smaller iterables to match the
- original iterable, see :func:`divide`.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- children = tee(iterable, n)
- return [islice(it, index, None, n) for index, it in enumerate(children)]
-
-
-def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
- """Yield tuples whose elements are offset from *iterable*.
- The amount by which the `i`-th item in each tuple is offset is given by
- the `i`-th item in *offsets*.
-
- >>> list(stagger([0, 1, 2, 3]))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
- >>> list(stagger(range(8), offsets=(0, 2, 4)))
- [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
-
- By default, the sequence will end when the final element of a tuple is the
- last item in the iterable. To continue until the first element of a tuple
- is the last item in the iterable, set *longest* to ``True``::
-
- >>> list(stagger([0, 1, 2, 3], longest=True))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- children = tee(iterable, len(offsets))
-
- return zip_offset(
- *children, offsets=offsets, longest=longest, fillvalue=fillvalue
- )
-
-
-class UnequalIterablesError(ValueError):
- def __init__(self, details=None):
- msg = 'Iterables have different lengths'
- if details is not None:
- msg += (': index 0 has length {}; index {} has length {}').format(
- *details
- )
-
- super().__init__(msg)
-
-
-def _zip_equal_generator(iterables):
- for combo in zip_longest(*iterables, fillvalue=_marker):
- for val in combo:
- if val is _marker:
- raise UnequalIterablesError()
- yield combo
-
-
-def _zip_equal(*iterables):
- # Check whether the iterables are all the same size.
- try:
- first_size = len(iterables[0])
- for i, it in enumerate(iterables[1:], 1):
- size = len(it)
- if size != first_size:
- break
- else:
- # If we didn't break out, we can use the built-in zip.
- return zip(*iterables)
-
- # If we did break out, there was a mismatch.
- raise UnequalIterablesError(details=(first_size, i, size))
- # If any one of the iterables didn't have a length, start reading
- # them until one runs out.
- except TypeError:
- return _zip_equal_generator(iterables)
-
-
-def zip_equal(*iterables):
- """``zip`` the input *iterables* together, but raise
- ``UnequalIterablesError`` if they aren't all the same length.
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abc')
- >>> list(zip_equal(it_1, it_2))
- [(0, 'a'), (1, 'b'), (2, 'c')]
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abcd')
- >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- more_itertools.more.UnequalIterablesError: Iterables have different
- lengths
-
- """
- if hexversion >= 0x30A00A6:
- warnings.warn(
- (
- 'zip_equal will be removed in a future version of '
- 'more-itertools. Use the builtin zip function with '
- 'strict=True instead.'
- ),
- DeprecationWarning,
- )
-
- return _zip_equal(*iterables)
-
-
-def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
- """``zip`` the input *iterables* together, but offset the `i`-th iterable
- by the `i`-th item in *offsets*.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
-
- This can be used as a lightweight alternative to SciPy or pandas to analyze
- data sets in which some series have a lead or lag relationship.
-
- By default, the sequence will end when the shortest iterable is exhausted.
- To continue until the longest iterable is exhausted, set *longest* to
- ``True``.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- if len(iterables) != len(offsets):
- raise ValueError("Number of iterables and offsets didn't match")
-
- staggered = []
- for it, n in zip(iterables, offsets):
- if n < 0:
- staggered.append(chain(repeat(fillvalue, -n), it))
- elif n > 0:
- staggered.append(islice(it, n, None))
- else:
- staggered.append(it)
-
- if longest:
- return zip_longest(*staggered, fillvalue=fillvalue)
-
- return zip(*staggered)
-
-
-def sort_together(iterables, key_list=(0,), key=None, reverse=False):
- """Return the input iterables sorted together, with *key_list* as the
- priority for sorting. All iterables are trimmed to the length of the
- shortest one.
-
- This can be used like the sorting function in a spreadsheet. If each
- iterable represents a column of data, the key list determines which
- columns are used for sorting.
-
- By default, all iterables are sorted using the ``0``-th iterable::
-
- >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
- >>> sort_together(iterables)
- [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
-
- Set a different key list to sort according to another iterable.
- Specifying multiple keys dictates how ties are broken::
-
- >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
- >>> sort_together(iterables, key_list=(1, 2))
- [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
-
- To sort by a function of the elements of the iterable, pass a *key*
- function. Its arguments are the elements of the iterables corresponding to
- the key list::
-
- >>> names = ('a', 'b', 'c')
- >>> lengths = (1, 2, 3)
- >>> widths = (5, 2, 1)
- >>> def area(length, width):
- ... return length * width
- >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
- [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
-
- Set *reverse* to ``True`` to sort in descending order.
-
- >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
- [(3, 2, 1), ('a', 'b', 'c')]
-
- """
- if key is None:
- # if there is no key function, the key argument to sorted is an
- # itemgetter
- key_argument = itemgetter(*key_list)
- else:
- # if there is a key function, call it with the items at the offsets
- # specified by the key function as arguments
- key_list = list(key_list)
- if len(key_list) == 1:
- # if key_list contains a single item, pass the item at that offset
- # as the only argument to the key function
- key_offset = key_list[0]
- key_argument = lambda zipped_items: key(zipped_items[key_offset])
- else:
- # if key_list contains multiple items, use itemgetter to return a
- # tuple of items, which we pass as *args to the key function
- get_key_items = itemgetter(*key_list)
- key_argument = lambda zipped_items: key(
- *get_key_items(zipped_items)
- )
-
- return list(
- zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
- )
-
-
-def unzip(iterable):
- """The inverse of :func:`zip`, this function disaggregates the elements
- of the zipped *iterable*.
-
- The ``i``-th iterable contains the ``i``-th element from each element
- of the zipped iterable. The first element is used to to determine the
- length of the remaining elements.
-
- >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> letters, numbers = unzip(iterable)
- >>> list(letters)
- ['a', 'b', 'c', 'd']
- >>> list(numbers)
- [1, 2, 3, 4]
-
- This is similar to using ``zip(*iterable)``, but it avoids reading
- *iterable* into memory. Note, however, that this function uses
- :func:`itertools.tee` and thus may require significant storage.
-
- """
- head, iterable = spy(iter(iterable))
- if not head:
- # empty iterable, e.g. zip([], [], [])
- return ()
- # spy returns a one-length iterable as head
- head = head[0]
- iterables = tee(iterable, len(head))
-
- def itemgetter(i):
- def getter(obj):
- try:
- return obj[i]
- except IndexError:
- # basically if we have an iterable like
- # iter([(1, 2, 3), (4, 5), (6,)])
- # the second unzipped iterable would fail at the third tuple
- # since it would try to access tup[1]
- # same with the third unzipped iterable and the second tuple
- # to support these "improperly zipped" iterables,
- # we create a custom itemgetter
- # which just stops the unzipped iterables
- # at first length mismatch
- raise StopIteration
-
- return getter
-
- return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
-
-
-def divide(n, iterable):
- """Divide the elements from *iterable* into *n* parts, maintaining
- order.
-
- >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 2, 3]
- >>> list(group_2)
- [4, 5, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 2, 3], [4, 5], [6, 7]]
-
- If the length of the iterable is smaller than n, then the last returned
- iterables will be empty:
-
- >>> children = divide(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function will exhaust the iterable before returning and may require
- significant storage. If order is not important, see :func:`distribute`,
- which does not first pull the iterable into memory.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- try:
- iterable[:0]
- except TypeError:
- seq = tuple(iterable)
- else:
- seq = iterable
-
- q, r = divmod(len(seq), n)
-
- ret = []
- stop = 0
- for i in range(1, n + 1):
- start = stop
- stop += q + 1 if i <= r else q
- ret.append(iter(seq[start:stop]))
-
- return ret
-
-
-def always_iterable(obj, base_type=(str, bytes)):
- """If *obj* is iterable, return an iterator over its items::
-
- >>> obj = (1, 2, 3)
- >>> list(always_iterable(obj))
- [1, 2, 3]
-
- If *obj* is not iterable, return a one-item iterable containing *obj*::
-
- >>> obj = 1
- >>> list(always_iterable(obj))
- [1]
-
- If *obj* is ``None``, return an empty iterable:
-
- >>> obj = None
- >>> list(always_iterable(None))
- []
-
- By default, binary and text strings are not considered iterable::
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj))
- ['foo']
-
- If *base_type* is set, objects for which ``isinstance(obj, base_type)``
- returns ``True`` won't be considered iterable.
-
- >>> obj = {'a': 1}
- >>> list(always_iterable(obj)) # Iterate over the dict's keys
- ['a']
- >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
- [{'a': 1}]
-
- Set *base_type* to ``None`` to avoid any special handling and treat objects
- Python considers iterable as iterable:
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj, base_type=None))
- ['f', 'o', 'o']
- """
- if obj is None:
- return iter(())
-
- if (base_type is not None) and isinstance(obj, base_type):
- return iter((obj,))
-
- try:
- return iter(obj)
- except TypeError:
- return iter((obj,))
-
-
-def adjacent(predicate, iterable, distance=1):
- """Return an iterable over `(bool, item)` tuples where the `item` is
- drawn from *iterable* and the `bool` indicates whether
- that item satisfies the *predicate* or is adjacent to an item that does.
-
- For example, to find whether items are adjacent to a ``3``::
-
- >>> list(adjacent(lambda x: x == 3, range(6)))
- [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
-
- Set *distance* to change what counts as adjacent. For example, to find
- whether items are two places away from a ``3``:
-
- >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
- [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
-
- This is useful for contextualizing the results of a search function.
- For example, a code comparison tool might want to identify lines that
- have changed, but also surrounding lines to give the viewer of the diff
- context.
-
- The predicate function will only be called once for each item in the
- iterable.
-
- See also :func:`groupby_transform`, which can be used with this function
- to group ranges of items with the same `bool` value.
-
- """
- # Allow distance=0 mainly for testing that it reproduces results with map()
- if distance < 0:
- raise ValueError('distance must be at least 0')
-
- i1, i2 = tee(iterable)
- padding = [False] * distance
- selected = chain(padding, map(predicate, i1), padding)
- adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
- return zip(adjacent_to_selected, i2)
-
-
-def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
- """An extension of :func:`itertools.groupby` that can apply transformations
- to the grouped data.
-
- * *keyfunc* is a function computing a key value for each item in *iterable*
- * *valuefunc* is a function that transforms the individual items from
- *iterable* after grouping
- * *reducefunc* is a function that transforms each group of items
-
- >>> iterable = 'aAAbBBcCC'
- >>> keyfunc = lambda k: k.upper()
- >>> valuefunc = lambda v: v.lower()
- >>> reducefunc = lambda g: ''.join(g)
- >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
- [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
-
- Each optional argument defaults to an identity function if not specified.
-
- :func:`groupby_transform` is useful when grouping elements of an iterable
- using a separate iterable as the key. To do this, :func:`zip` the iterables
- and pass a *keyfunc* that extracts the first element and a *valuefunc*
- that extracts the second element::
-
- >>> from operator import itemgetter
- >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
- >>> values = 'abcdefghi'
- >>> iterable = zip(keys, values)
- >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
- >>> [(k, ''.join(g)) for k, g in grouper]
- [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
-
- Note that the order of items in the iterable is significant.
- Only adjacent items are grouped together, so if you don't want any
- duplicate groups, you should sort the iterable by the key function.
-
- """
- ret = groupby(iterable, keyfunc)
- if valuefunc:
- ret = ((k, map(valuefunc, g)) for k, g in ret)
- if reducefunc:
- ret = ((k, reducefunc(g)) for k, g in ret)
-
- return ret
-
-
-class numeric_range(abc.Sequence, abc.Hashable):
- """An extension of the built-in ``range()`` function whose arguments can
- be any orderable numeric type.
-
- With only *stop* specified, *start* defaults to ``0`` and *step*
- defaults to ``1``. The output items will match the type of *stop*:
-
- >>> list(numeric_range(3.5))
- [0.0, 1.0, 2.0, 3.0]
-
- With only *start* and *stop* specified, *step* defaults to ``1``. The
- output items will match the type of *start*:
-
- >>> from decimal import Decimal
- >>> start = Decimal('2.1')
- >>> stop = Decimal('5.1')
- >>> list(numeric_range(start, stop))
- [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
-
- With *start*, *stop*, and *step* specified the output items will match
- the type of ``start + step``:
-
- >>> from fractions import Fraction
- >>> start = Fraction(1, 2) # Start at 1/2
- >>> stop = Fraction(5, 2) # End at 5/2
- >>> step = Fraction(1, 2) # Count by 1/2
- >>> list(numeric_range(start, stop, step))
- [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
-
- If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
-
- >>> list(numeric_range(3, -1, -1.0))
- [3.0, 2.0, 1.0, 0.0]
-
- Be aware of the limitations of floating point numbers; the representation
- of the yielded numbers may be surprising.
-
- ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
- is a ``datetime.timedelta`` object:
-
- >>> import datetime
- >>> start = datetime.datetime(2019, 1, 1)
- >>> stop = datetime.datetime(2019, 1, 3)
- >>> step = datetime.timedelta(days=1)
- >>> items = iter(numeric_range(start, stop, step))
- >>> next(items)
- datetime.datetime(2019, 1, 1, 0, 0)
- >>> next(items)
- datetime.datetime(2019, 1, 2, 0, 0)
-
- """
-
- _EMPTY_HASH = hash(range(0, 0))
-
- def __init__(self, *args):
- argc = len(args)
- if argc == 1:
- (self._stop,) = args
- self._start = type(self._stop)(0)
- self._step = type(self._stop - self._start)(1)
- elif argc == 2:
- self._start, self._stop = args
- self._step = type(self._stop - self._start)(1)
- elif argc == 3:
- self._start, self._stop, self._step = args
- elif argc == 0:
- raise TypeError(
- 'numeric_range expected at least '
- '1 argument, got {}'.format(argc)
- )
- else:
- raise TypeError(
- 'numeric_range expected at most '
- '3 arguments, got {}'.format(argc)
- )
-
- self._zero = type(self._step)(0)
- if self._step == self._zero:
- raise ValueError('numeric_range() arg 3 must not be zero')
- self._growing = self._step > self._zero
- self._init_len()
-
- def __bool__(self):
- if self._growing:
- return self._start < self._stop
- else:
- return self._start > self._stop
-
- def __contains__(self, elem):
- if self._growing:
- if self._start <= elem < self._stop:
- return (elem - self._start) % self._step == self._zero
- else:
- if self._start >= elem > self._stop:
- return (self._start - elem) % (-self._step) == self._zero
-
- return False
-
- def __eq__(self, other):
- if isinstance(other, numeric_range):
- empty_self = not bool(self)
- empty_other = not bool(other)
- if empty_self or empty_other:
- return empty_self and empty_other # True if both empty
- else:
- return (
- self._start == other._start
- and self._step == other._step
- and self._get_by_index(-1) == other._get_by_index(-1)
- )
- else:
- return False
-
- def __getitem__(self, key):
- if isinstance(key, int):
- return self._get_by_index(key)
- elif isinstance(key, slice):
- step = self._step if key.step is None else key.step * self._step
-
- if key.start is None or key.start <= -self._len:
- start = self._start
- elif key.start >= self._len:
- start = self._stop
- else: # -self._len < key.start < self._len
- start = self._get_by_index(key.start)
-
- if key.stop is None or key.stop >= self._len:
- stop = self._stop
- elif key.stop <= -self._len:
- stop = self._start
- else: # -self._len < key.stop < self._len
- stop = self._get_by_index(key.stop)
-
- return numeric_range(start, stop, step)
- else:
- raise TypeError(
- 'numeric range indices must be '
- 'integers or slices, not {}'.format(type(key).__name__)
- )
-
- def __hash__(self):
- if self:
- return hash((self._start, self._get_by_index(-1), self._step))
- else:
- return self._EMPTY_HASH
-
- def __iter__(self):
- values = (self._start + (n * self._step) for n in count())
- if self._growing:
- return takewhile(partial(gt, self._stop), values)
- else:
- return takewhile(partial(lt, self._stop), values)
-
- def __len__(self):
- return self._len
-
- def _init_len(self):
- if self._growing:
- start = self._start
- stop = self._stop
- step = self._step
- else:
- start = self._stop
- stop = self._start
- step = -self._step
- distance = stop - start
- if distance <= self._zero:
- self._len = 0
- else: # distance > 0 and step > 0: regular euclidean division
- q, r = divmod(distance, step)
- self._len = int(q) + int(r != self._zero)
-
- def __reduce__(self):
- return numeric_range, (self._start, self._stop, self._step)
-
- def __repr__(self):
- if self._step == 1:
- return "numeric_range({}, {})".format(
- repr(self._start), repr(self._stop)
- )
- else:
- return "numeric_range({}, {}, {})".format(
- repr(self._start), repr(self._stop), repr(self._step)
- )
-
- def __reversed__(self):
- return iter(
- numeric_range(
- self._get_by_index(-1), self._start - self._step, -self._step
- )
- )
-
- def count(self, value):
- return int(value in self)
-
- def index(self, value):
- if self._growing:
- if self._start <= value < self._stop:
- q, r = divmod(value - self._start, self._step)
- if r == self._zero:
- return int(q)
- else:
- if self._start >= value > self._stop:
- q, r = divmod(self._start - value, -self._step)
- if r == self._zero:
- return int(q)
-
- raise ValueError("{} is not in numeric range".format(value))
-
- def _get_by_index(self, i):
- if i < 0:
- i += self._len
- if i < 0 or i >= self._len:
- raise IndexError("numeric range object index out of range")
- return self._start + i * self._step
-
-
-def count_cycle(iterable, n=None):
- """Cycle through the items from *iterable* up to *n* times, yielding
- the number of completed cycles along with each item. If *n* is omitted the
- process repeats indefinitely.
-
- >>> list(count_cycle('AB', 3))
- [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
-
- """
- iterable = tuple(iterable)
- if not iterable:
- return iter(())
- counter = count() if n is None else range(n)
- return ((i, item) for i in counter for item in iterable)
-
-
-def mark_ends(iterable):
- """Yield 3-tuples of the form ``(is_first, is_last, item)``.
-
- >>> list(mark_ends('ABC'))
- [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
-
- Use this when looping over an iterable to take special action on its first
- and/or last items:
-
- >>> iterable = ['Header', 100, 200, 'Footer']
- >>> total = 0
- >>> for is_first, is_last, item in mark_ends(iterable):
- ... if is_first:
- ... continue # Skip the header
- ... if is_last:
- ... continue # Skip the footer
- ... total += item
- >>> print(total)
- 300
- """
- it = iter(iterable)
-
- try:
- b = next(it)
- except StopIteration:
- return
-
- try:
- for i in count():
- a = b
- b = next(it)
- yield i == 0, False, a
-
- except StopIteration:
- yield i == 0, True, a
-
-
-def locate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
- [1, 2, 4]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item.
-
- >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
- [1, 3]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(locate(iterable, pred=pred, window_size=3))
- [1, 5, 9]
-
- Use with :func:`seekable` to find indexes and then retrieve the associated
- items:
-
- >>> from itertools import count
- >>> from more_itertools import seekable
- >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
- >>> it = seekable(source)
- >>> pred = lambda x: x > 100
- >>> indexes = locate(it, pred=pred)
- >>> i = next(indexes)
- >>> it.seek(i)
- >>> next(it)
- 106
-
- """
- if window_size is None:
- return compress(count(), map(pred, iterable))
-
- if window_size < 1:
- raise ValueError('window size must be at least 1')
-
- it = windowed(iterable, window_size, fillvalue=_marker)
- return compress(count(), starmap(pred, it))
-
-
-def lstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the beginning
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the start of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(lstrip(iterable, pred))
- [1, 2, None, 3, False, None]
-
- This function is analogous to to :func:`str.lstrip`, and is essentially
- an wrapper for :func:`itertools.dropwhile`.
-
- """
- return dropwhile(pred, iterable)
-
-
-def rstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the end
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the end of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(rstrip(iterable, pred))
- [None, False, None, 1, 2, None, 3]
-
- This function is analogous to :func:`str.rstrip`.
-
- """
- cache = []
- cache_append = cache.append
- cache_clear = cache.clear
- for x in iterable:
- if pred(x):
- cache_append(x)
- else:
- yield from cache
- cache_clear()
- yield x
-
-
-def strip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the
- beginning and end for which *pred* returns ``True``.
-
- For example, to remove a set of items from both ends of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(strip(iterable, pred))
- [1, 2, None, 3]
-
- This function is analogous to :func:`str.strip`.
-
- """
- return rstrip(lstrip(iterable, pred), pred)
-
-
-class islice_extended:
- """An extension of :func:`itertools.islice` that supports negative values
- for *stop*, *start*, and *step*.
-
- >>> iterable = iter('abcdefgh')
- >>> list(islice_extended(iterable, -4, -1))
- ['e', 'f', 'g']
-
- Slices with negative values require some caching of *iterable*, but this
- function takes care to minimize the amount of memory required.
-
- For example, you can use a negative step with an infinite iterator:
-
- >>> from itertools import count
- >>> list(islice_extended(count(), 110, 99, -2))
- [110, 108, 106, 104, 102, 100]
-
- You can also use slice notation directly:
-
- >>> iterable = map(str, count())
- >>> it = islice_extended(iterable)[10:20:2]
- >>> list(it)
- ['10', '12', '14', '16', '18']
-
- """
-
- def __init__(self, iterable, *args):
- it = iter(iterable)
- if args:
- self._iterable = _islice_helper(it, slice(*args))
- else:
- self._iterable = it
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterable)
-
- def __getitem__(self, key):
- if isinstance(key, slice):
- return islice_extended(_islice_helper(self._iterable, key))
-
- raise TypeError('islice_extended.__getitem__ argument must be a slice')
-
-
-def _islice_helper(it, s):
- start = s.start
- stop = s.stop
- if s.step == 0:
- raise ValueError('step argument must be a non-zero integer or None.')
- step = s.step or 1
-
- if step > 0:
- start = 0 if (start is None) else start
-
- if start < 0:
- # Consume all but the last -start items
- cache = deque(enumerate(it, 1), maxlen=-start)
- len_iter = cache[-1][0] if cache else 0
-
- # Adjust start to be positive
- i = max(len_iter + start, 0)
-
- # Adjust stop to be positive
- if stop is None:
- j = len_iter
- elif stop >= 0:
- j = min(stop, len_iter)
- else:
- j = max(len_iter + stop, 0)
-
- # Slice the cache
- n = j - i
- if n <= 0:
- return
-
- for index, item in islice(cache, 0, n, step):
- yield item
- elif (stop is not None) and (stop < 0):
- # Advance to the start position
- next(islice(it, start, start), None)
-
- # When stop is negative, we have to carry -stop items while
- # iterating
- cache = deque(islice(it, -stop), maxlen=-stop)
-
- for index, item in enumerate(it):
- cached_item = cache.popleft()
- if index % step == 0:
- yield cached_item
- cache.append(item)
- else:
- # When both start and stop are positive we have the normal case
- yield from islice(it, start, stop, step)
- else:
- start = -1 if (start is None) else start
-
- if (stop is not None) and (stop < 0):
- # Consume all but the last items
- n = -stop - 1
- cache = deque(enumerate(it, 1), maxlen=n)
- len_iter = cache[-1][0] if cache else 0
-
- # If start and stop are both negative they are comparable and
- # we can just slice. Otherwise we can adjust start to be negative
- # and then slice.
- if start < 0:
- i, j = start, stop
- else:
- i, j = min(start - len_iter, -1), None
-
- for index, item in list(cache)[i:j:step]:
- yield item
- else:
- # Advance to the stop position
- if stop is not None:
- m = stop + 1
- next(islice(it, m, m), None)
-
- # stop is positive, so if start is negative they are not comparable
- # and we need the rest of the items.
- if start < 0:
- i = start
- n = None
- # stop is None and start is positive, so we just need items up to
- # the start index.
- elif stop is None:
- i = None
- n = start + 1
- # Both stop and start are positive, so they are comparable.
- else:
- i = None
- n = start - stop
- if n <= 0:
- return
-
- cache = list(islice(it, n))
-
- yield from cache[i::step]
-
-
-def always_reversible(iterable):
- """An extension of :func:`reversed` that supports all iterables, not
- just those which implement the ``Reversible`` or ``Sequence`` protocols.
-
- >>> print(*always_reversible(x for x in range(3)))
- 2 1 0
-
- If the iterable is already reversible, this function returns the
- result of :func:`reversed()`. If the iterable is not reversible,
- this function will cache the remaining items in the iterable and
- yield them in reverse order, which may require significant storage.
- """
- try:
- return reversed(iterable)
- except TypeError:
- return reversed(list(iterable))
-
-
-def consecutive_groups(iterable, ordering=lambda x: x):
- """Yield groups of consecutive items using :func:`itertools.groupby`.
- The *ordering* function determines whether two items are adjacent by
- returning their position.
-
- By default, the ordering function is the identity function. This is
- suitable for finding runs of numbers:
-
- >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
- >>> for group in consecutive_groups(iterable):
- ... print(list(group))
- [1]
- [10, 11, 12]
- [20]
- [30, 31, 32, 33]
- [40]
-
- For finding runs of adjacent letters, try using the :meth:`index` method
- of a string of letters:
-
- >>> from string import ascii_lowercase
- >>> iterable = 'abcdfgilmnop'
- >>> ordering = ascii_lowercase.index
- >>> for group in consecutive_groups(iterable, ordering):
- ... print(list(group))
- ['a', 'b', 'c', 'd']
- ['f', 'g']
- ['i']
- ['l', 'm', 'n', 'o', 'p']
-
- Each group of consecutive items is an iterator that shares it source with
- *iterable*. When an an output group is advanced, the previous group is
- no longer available unless its elements are copied (e.g., into a ``list``).
-
- >>> iterable = [1, 2, 11, 12, 21, 22]
- >>> saved_groups = []
- >>> for group in consecutive_groups(iterable):
- ... saved_groups.append(list(group)) # Copy group elements
- >>> saved_groups
- [[1, 2], [11, 12], [21, 22]]
-
- """
- for k, g in groupby(
- enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
- ):
- yield map(itemgetter(1), g)
-
-
-def difference(iterable, func=sub, *, initial=None):
- """This function is the inverse of :func:`itertools.accumulate`. By default
- it will compute the first difference of *iterable* using
- :func:`operator.sub`:
-
- >>> from itertools import accumulate
- >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
- >>> list(difference(iterable))
- [0, 1, 2, 3, 4]
-
- *func* defaults to :func:`operator.sub`, but other functions can be
- specified. They will be applied as follows::
-
- A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
-
- For example, to do progressive division:
-
- >>> iterable = [1, 2, 6, 24, 120]
- >>> func = lambda x, y: x // y
- >>> list(difference(iterable, func))
- [1, 2, 3, 4, 5]
-
- If the *initial* keyword is set, the first element will be skipped when
- computing successive differences.
-
- >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
- >>> list(difference(it, initial=10))
- [1, 2, 3]
-
- """
- a, b = tee(iterable)
- try:
- first = [next(b)]
- except StopIteration:
- return iter([])
-
- if initial is not None:
- first = []
-
- return chain(first, starmap(func, zip(b, a)))
-
-
-class SequenceView(Sequence):
- """Return a read-only view of the sequence object *target*.
-
- :class:`SequenceView` objects are analogous to Python's built-in
- "dictionary view" types. They provide a dynamic view of a sequence's items,
- meaning that when the sequence updates, so does the view.
-
- >>> seq = ['0', '1', '2']
- >>> view = SequenceView(seq)
- >>> view
- SequenceView(['0', '1', '2'])
- >>> seq.append('3')
- >>> view
- SequenceView(['0', '1', '2', '3'])
-
- Sequence views support indexing, slicing, and length queries. They act
- like the underlying sequence, except they don't allow assignment:
-
- >>> view[1]
- '1'
- >>> view[1:-1]
- ['1', '2']
- >>> len(view)
- 4
-
- Sequence views are useful as an alternative to copying, as they don't
- require (much) extra storage.
-
- """
-
- def __init__(self, target):
- if not isinstance(target, Sequence):
- raise TypeError
- self._target = target
-
- def __getitem__(self, index):
- return self._target[index]
-
- def __len__(self):
- return len(self._target)
-
- def __repr__(self):
- return '{}({})'.format(self.__class__.__name__, repr(self._target))
-
-
-class seekable:
- """Wrap an iterator to allow for seeking backward and forward. This
- progressively caches the items in the source iterable so they can be
- re-visited.
-
- Call :meth:`seek` with an index to seek to that position in the source
- iterable.
-
- To "reset" an iterator, seek to ``0``:
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> it.seek(0)
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> next(it)
- '3'
-
- You can also seek forward:
-
- >>> it = seekable((str(n) for n in range(20)))
- >>> it.seek(10)
- >>> next(it)
- '10'
- >>> it.seek(20) # Seeking past the end of the source isn't a problem
- >>> list(it)
- []
- >>> it.seek(0) # Resetting works even after hitting the end
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
-
- Call :meth:`peek` to look ahead one item without advancing the iterator:
-
- >>> it = seekable('1234')
- >>> it.peek()
- '1'
- >>> list(it)
- ['1', '2', '3', '4']
- >>> it.peek(default='empty')
- 'empty'
-
- Before the iterator is at its end, calling :func:`bool` on it will return
- ``True``. After it will return ``False``:
-
- >>> it = seekable('5678')
- >>> bool(it)
- True
- >>> list(it)
- ['5', '6', '7', '8']
- >>> bool(it)
- False
-
- You may view the contents of the cache with the :meth:`elements` method.
- That returns a :class:`SequenceView`, a view that updates automatically:
-
- >>> it = seekable((str(n) for n in range(10)))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> elements = it.elements()
- >>> elements
- SequenceView(['0', '1', '2'])
- >>> next(it)
- '3'
- >>> elements
- SequenceView(['0', '1', '2', '3'])
-
- By default, the cache grows as the source iterable progresses, so beware of
- wrapping very large or infinite iterables. Supply *maxlen* to limit the
- size of the cache (this of course limits how far back you can seek).
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()), maxlen=2)
- >>> next(it), next(it), next(it), next(it)
- ('0', '1', '2', '3')
- >>> list(it.elements())
- ['2', '3']
- >>> it.seek(0)
- >>> next(it), next(it), next(it), next(it)
- ('2', '3', '4', '5')
- >>> next(it)
- '6'
-
- """
-
- def __init__(self, iterable, maxlen=None):
- self._source = iter(iterable)
- if maxlen is None:
- self._cache = []
- else:
- self._cache = deque([], maxlen)
- self._index = None
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._index is not None:
- try:
- item = self._cache[self._index]
- except IndexError:
- self._index = None
- else:
- self._index += 1
- return item
-
- item = next(self._source)
- self._cache.append(item)
- return item
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- try:
- peeked = next(self)
- except StopIteration:
- if default is _marker:
- raise
- return default
- if self._index is None:
- self._index = len(self._cache)
- self._index -= 1
- return peeked
-
- def elements(self):
- return SequenceView(self._cache)
-
- def seek(self, index):
- self._index = index
- remainder = index - len(self._cache)
- if remainder > 0:
- consume(self, remainder)
-
-
-class run_length:
- """
- :func:`run_length.encode` compresses an iterable with run-length encoding.
- It yields groups of repeated items with the count of how many times they
- were repeated:
-
- >>> uncompressed = 'abbcccdddd'
- >>> list(run_length.encode(uncompressed))
- [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-
- :func:`run_length.decode` decompresses an iterable that was previously
- compressed with run-length encoding. It yields the items of the
- decompressed iterable:
-
- >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> list(run_length.decode(compressed))
- ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
-
- """
-
- @staticmethod
- def encode(iterable):
- return ((k, ilen(g)) for k, g in groupby(iterable))
-
- @staticmethod
- def decode(iterable):
- return chain.from_iterable(repeat(k, n) for k, n in iterable)
-
-
-def exactly_n(iterable, n, predicate=bool):
- """Return ``True`` if exactly ``n`` items in the iterable are ``True``
- according to the *predicate* function.
-
- >>> exactly_n([True, True, False], 2)
- True
- >>> exactly_n([True, True, False], 1)
- False
- >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
- True
-
- The iterable will be advanced until ``n + 1`` truthy items are encountered,
- so avoid calling it on infinite iterables.
-
- """
- return len(take(n + 1, filter(predicate, iterable))) == n
-
-
-def circular_shifts(iterable):
- """Return a list of circular shifts of *iterable*.
-
- >>> circular_shifts(range(4))
- [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
- """
- lst = list(iterable)
- return take(len(lst), windowed(cycle(lst), len(lst)))
-
-
-def make_decorator(wrapping_func, result_index=0):
- """Return a decorator version of *wrapping_func*, which is a function that
- modifies an iterable. *result_index* is the position in that function's
- signature where the iterable goes.
-
- This lets you use itertools on the "production end," i.e. at function
- definition. This can augment what the function returns without changing the
- function's code.
-
- For example, to produce a decorator version of :func:`chunked`:
-
- >>> from more_itertools import chunked
- >>> chunker = make_decorator(chunked, result_index=0)
- >>> @chunker(3)
- ... def iter_range(n):
- ... return iter(range(n))
- ...
- >>> list(iter_range(9))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
-
- To only allow truthy items to be returned:
-
- >>> truth_serum = make_decorator(filter, result_index=1)
- >>> @truth_serum(bool)
- ... def boolean_test():
- ... return [0, 1, '', ' ', False, True]
- ...
- >>> list(boolean_test())
- [1, ' ', True]
-
- The :func:`peekable` and :func:`seekable` wrappers make for practical
- decorators:
-
- >>> from more_itertools import peekable
- >>> peekable_function = make_decorator(peekable)
- >>> @peekable_function()
- ... def str_range(*args):
- ... return (str(x) for x in range(*args))
- ...
- >>> it = str_range(1, 20, 2)
- >>> next(it), next(it), next(it)
- ('1', '3', '5')
- >>> it.peek()
- '7'
- >>> next(it)
- '7'
-
- """
- # See https://sites.google.com/site/bbayles/index/decorator_factory for
- # notes on how this works.
- def decorator(*wrapping_args, **wrapping_kwargs):
- def outer_wrapper(f):
- def inner_wrapper(*args, **kwargs):
- result = f(*args, **kwargs)
- wrapping_args_ = list(wrapping_args)
- wrapping_args_.insert(result_index, result)
- return wrapping_func(*wrapping_args_, **wrapping_kwargs)
-
- return inner_wrapper
-
- return outer_wrapper
-
- return decorator
-
-
-def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
- """Return a dictionary that maps the items in *iterable* to categories
- defined by *keyfunc*, transforms them with *valuefunc*, and
- then summarizes them by category with *reducefunc*.
-
- *valuefunc* defaults to the identity function if it is unspecified.
- If *reducefunc* is unspecified, no summarization takes place:
-
- >>> keyfunc = lambda x: x.upper()
- >>> result = map_reduce('abbccc', keyfunc)
- >>> sorted(result.items())
- [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
-
- Specifying *valuefunc* transforms the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> result = map_reduce('abbccc', keyfunc, valuefunc)
- >>> sorted(result.items())
- [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
-
- Specifying *reducefunc* summarizes the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> reducefunc = sum
- >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
- >>> sorted(result.items())
- [('A', 1), ('B', 2), ('C', 3)]
-
- You may want to filter the input iterable before applying the map/reduce
- procedure:
-
- >>> all_items = range(30)
- >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
- >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
- >>> categories = map_reduce(items, keyfunc=keyfunc)
- >>> sorted(categories.items())
- [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
- >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
- >>> sorted(summaries.items())
- [(0, 90), (1, 75)]
-
- Note that all items in the iterable are gathered into a list before the
- summarization step, which may require significant storage.
-
- The returned object is a :obj:`collections.defaultdict` with the
- ``default_factory`` set to ``None``, such that it behaves like a normal
- dictionary.
-
- """
- valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
-
- ret = defaultdict(list)
- for item in iterable:
- key = keyfunc(item)
- value = valuefunc(item)
- ret[key].append(value)
-
- if reducefunc is not None:
- for key, value_list in ret.items():
- ret[key] = reducefunc(value_list)
-
- ret.default_factory = None
- return ret
-
-
-def rlocate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``, starting from the right and moving left.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
- [4, 2, 1]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item:
-
- >>> iterable = iter('abcb')
- >>> pred = lambda x: x == 'b'
- >>> list(rlocate(iterable, pred))
- [3, 1]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(rlocate(iterable, pred=pred, window_size=3))
- [9, 5, 1]
-
- Beware, this function won't return anything for infinite iterables.
- If *iterable* is reversible, ``rlocate`` will reverse it and search from
- the right. Otherwise, it will search from the left and return the results
- in reverse order.
-
- See :func:`locate` to for other example applications.
-
- """
- if window_size is None:
- try:
- len_iter = len(iterable)
- return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
- except TypeError:
- pass
-
- return reversed(list(locate(iterable, pred, window_size)))
-
-
-def replace(iterable, pred, substitutes, count=None, window_size=1):
- """Yield the items from *iterable*, replacing the items for which *pred*
- returns ``True`` with the items from the iterable *substitutes*.
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
- >>> pred = lambda x: x == 0
- >>> substitutes = (2, 3)
- >>> list(replace(iterable, pred, substitutes))
- [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
-
- If *count* is given, the number of replacements will be limited:
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
- >>> pred = lambda x: x == 0
- >>> substitutes = [None]
- >>> list(replace(iterable, pred, substitutes, count=2))
- [1, 1, None, 1, 1, None, 1, 1, 0]
-
- Use *window_size* to control the number of items passed as arguments to
- *pred*. This allows for locating and replacing subsequences.
-
- >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
- >>> window_size = 3
- >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
- >>> substitutes = [3, 4] # Splice in these items
- >>> list(replace(iterable, pred, substitutes, window_size=window_size))
- [3, 4, 5, 3, 4, 5]
-
- """
- if window_size < 1:
- raise ValueError('window_size must be at least 1')
-
- # Save the substitutes iterable, since it's used more than once
- substitutes = tuple(substitutes)
-
- # Add padding such that the number of windows matches the length of the
- # iterable
- it = chain(iterable, [_marker] * (window_size - 1))
- windows = windowed(it, window_size)
-
- n = 0
- for w in windows:
- # If the current window matches our predicate (and we haven't hit
- # our maximum number of replacements), splice in the substitutes
- # and then consume the following windows that overlap with this one.
- # For example, if the iterable is (0, 1, 2, 3, 4...)
- # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
- # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
- if pred(*w):
- if (count is None) or (n < count):
- n += 1
- yield from substitutes
- consume(windows, window_size - 1)
- continue
-
- # If there was no match (or we've reached the replacement limit),
- # yield the first item from the window.
- if w and (w[0] is not _marker):
- yield w[0]
-
-
-def partitions(iterable):
- """Yield all possible order-preserving partitions of *iterable*.
-
- >>> iterable = 'abc'
- >>> for part in partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['a', 'b', 'c']
-
- This is unrelated to :func:`partition`.
-
- """
- sequence = list(iterable)
- n = len(sequence)
- for i in powerset(range(1, n)):
- yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
-
-
-def set_partitions(iterable, k=None):
- """
- Yield the set partitions of *iterable* into *k* parts. Set partitions are
- not order-preserving.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable, 2):
- ... print([''.join(p) for p in part])
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
-
-
- If *k* is not given, every set partition is generated.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
- ['a', 'b', 'c']
-
- """
- L = list(iterable)
- n = len(L)
- if k is not None:
- if k < 1:
- raise ValueError(
- "Can't partition in a negative or zero number of groups"
- )
- elif k > n:
- return
-
- def set_partitions_helper(L, k):
- n = len(L)
- if k == 1:
- yield [L]
- elif n == k:
- yield [[s] for s in L]
- else:
- e, *M = L
- for p in set_partitions_helper(M, k - 1):
- yield [[e], *p]
- for p in set_partitions_helper(M, k):
- for i in range(len(p)):
- yield p[:i] + [[e] + p[i]] + p[i + 1 :]
-
- if k is None:
- for k in range(1, n + 1):
- yield from set_partitions_helper(L, k)
- else:
- yield from set_partitions_helper(L, k)
-
-
-class time_limited:
- """
- Yield items from *iterable* until *limit_seconds* have passed.
- If the time limit expires before all items have been yielded, the
- ``timed_out`` parameter will be set to ``True``.
-
- >>> from time import sleep
- >>> def generator():
- ... yield 1
- ... yield 2
- ... sleep(0.2)
- ... yield 3
- >>> iterable = time_limited(0.1, generator())
- >>> list(iterable)
- [1, 2]
- >>> iterable.timed_out
- True
-
- Note that the time is checked before each item is yielded, and iteration
- stops if the time elapsed is greater than *limit_seconds*. If your time
- limit is 1 second, but it takes 2 seconds to generate the first item from
- the iterable, the function will run for 2 seconds and not yield anything.
-
- """
-
- def __init__(self, limit_seconds, iterable):
- if limit_seconds < 0:
- raise ValueError('limit_seconds must be positive')
- self.limit_seconds = limit_seconds
- self._iterable = iter(iterable)
- self._start_time = monotonic()
- self.timed_out = False
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._iterable)
- if monotonic() - self._start_time > self.limit_seconds:
- self.timed_out = True
- raise StopIteration
-
- return item
-
-
-def only(iterable, default=None, too_long=None):
- """If *iterable* has only one item, return it.
- If it has zero items, return *default*.
- If it has more than one item, raise the exception given by *too_long*,
- which is ``ValueError`` by default.
-
- >>> only([], default='missing')
- 'missing'
- >>> only([1])
- 1
- >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 1, 2,
- and perhaps more.'
- >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- TypeError
-
- Note that :func:`only` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check
- iterable contents less destructively.
- """
- it = iter(iterable)
- first_value = next(it, default)
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def ichunked(iterable, n):
- """Break *iterable* into sub-iterables with *n* elements each.
- :func:`ichunked` is like :func:`chunked`, but it yields iterables
- instead of lists.
-
- If the sub-iterables are read in order, the elements of *iterable*
- won't be stored in memory.
- If they are read out of order, :func:`itertools.tee` is used to cache
- elements as necessary.
-
- >>> from itertools import count
- >>> all_chunks = ichunked(count(), 4)
- >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
- >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
- [4, 5, 6, 7]
- >>> list(c_1)
- [0, 1, 2, 3]
- >>> list(c_3)
- [8, 9, 10, 11]
-
- """
- source = iter(iterable)
-
- while True:
- # Check to see whether we're at the end of the source iterable
- item = next(source, _marker)
- if item is _marker:
- return
-
- # Clone the source and yield an n-length slice
- source, it = tee(chain([item], source))
- yield islice(it, n)
-
- # Advance the source iterable
- consume(source, n)
-
-
-def distinct_combinations(iterable, r):
- """Yield the distinct combinations of *r* items taken from *iterable*.
-
- >>> list(distinct_combinations([0, 0, 1], 2))
- [(0, 0), (0, 1)]
-
- Equivalent to ``set(combinations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- """
- if r < 0:
- raise ValueError('r must be non-negative')
- elif r == 0:
- yield ()
- return
- pool = tuple(iterable)
- generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
- current_combo = [None] * r
- level = 0
- while generators:
- try:
- cur_idx, p = next(generators[-1])
- except StopIteration:
- generators.pop()
- level -= 1
- continue
- current_combo[level] = p
- if level + 1 == r:
- yield tuple(current_combo)
- else:
- generators.append(
- unique_everseen(
- enumerate(pool[cur_idx + 1 :], cur_idx + 1),
- key=itemgetter(1),
- )
- )
- level += 1
-
-
-def filter_except(validator, iterable, *exceptions):
- """Yield the items from *iterable* for which the *validator* function does
- not raise one of the specified *exceptions*.
-
- *validator* is called for each item in *iterable*.
- It should be a function that accepts one argument and raises an exception
- if that item is not valid.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(filter_except(int, iterable, ValueError, TypeError))
- ['1', '2', '4']
-
- If an exception other than one given by *exceptions* is raised by
- *validator*, it is raised like normal.
- """
- for item in iterable:
- try:
- validator(item)
- except exceptions:
- pass
- else:
- yield item
-
-
-def map_except(function, iterable, *exceptions):
- """Transform each item from *iterable* with *function* and yield the
- result, unless *function* raises one of the specified *exceptions*.
-
- *function* is called to transform each item in *iterable*.
- It should accept one argument.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(map_except(int, iterable, ValueError, TypeError))
- [1, 2, 4]
-
- If an exception other than one given by *exceptions* is raised by
- *function*, it is raised like normal.
- """
- for item in iterable:
- try:
- yield function(item)
- except exceptions:
- pass
-
-
-def map_if(iterable, pred, func, func_else=lambda x: x):
- """Evaluate each item from *iterable* using *pred*. If the result is
- equivalent to ``True``, transform the item with *func* and yield it.
- Otherwise, transform the item with *func_else* and yield it.
-
- *pred*, *func*, and *func_else* should each be functions that accept
- one argument. By default, *func_else* is the identity function.
-
- >>> from math import sqrt
- >>> iterable = list(range(-5, 5))
- >>> iterable
- [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
- >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
- [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
- >>> list(map_if(iterable, lambda x: x >= 0,
- ... lambda x: f'{sqrt(x):.2f}', lambda x: None))
- [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
- """
- for item in iterable:
- yield func(item) if pred(item) else func_else(item)
-
-
-def _sample_unweighted(iterable, k):
- # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
- # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
-
- # Fill up the reservoir (collection of samples) with the first `k` samples
- reservoir = take(k, iterable)
-
- # Generate random number that's the largest in a sample of k U(0,1) numbers
- # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
- W = exp(log(random()) / k)
-
- # The number of elements to skip before changing the reservoir is a random
- # number with a geometric distribution. Sample it using random() and logs.
- next_index = k + floor(log(random()) / log(1 - W))
-
- for index, element in enumerate(iterable, k):
-
- if index == next_index:
- reservoir[randrange(k)] = element
- # The new W is the largest in a sample of k U(0, `old_W`) numbers
- W *= exp(log(random()) / k)
- next_index += floor(log(random()) / log(1 - W)) + 1
-
- return reservoir
-
-
-def _sample_weighted(iterable, k, weights):
- # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
- # "Weighted random sampling with a reservoir".
-
- # Log-transform for numerical stability for weights that are small/large
- weight_keys = (log(random()) / weight for weight in weights)
-
- # Fill up the reservoir (collection of samples) with the first `k`
- # weight-keys and elements, then heapify the list.
- reservoir = take(k, zip(weight_keys, iterable))
- heapify(reservoir)
-
- # The number of jumps before changing the reservoir is a random variable
- # with an exponential distribution. Sample it using random() and logs.
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
-
- for weight, element in zip(weights, iterable):
- if weight >= weights_to_skip:
- # The notation here is consistent with the paper, but we store
- # the weight-keys in log-space for better numerical stability.
- smallest_weight_key, _ = reservoir[0]
- t_w = exp(weight * smallest_weight_key)
- r_2 = uniform(t_w, 1) # generate U(t_w, 1)
- weight_key = log(r_2) / weight
- heapreplace(reservoir, (weight_key, element))
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
- else:
- weights_to_skip -= weight
-
- # Equivalent to [element for weight_key, element in sorted(reservoir)]
- return [heappop(reservoir)[1] for _ in range(k)]
-
-
-def sample(iterable, k, weights=None):
- """Return a *k*-length list of elements chosen (without replacement)
- from the *iterable*. Like :func:`random.sample`, but works on iterables
- of unknown length.
-
- >>> iterable = range(100)
- >>> sample(iterable, 5) # doctest: +SKIP
- [81, 60, 96, 16, 4]
-
- An iterable with *weights* may also be given:
-
- >>> iterable = range(100)
- >>> weights = (i * i + 1 for i in range(100))
- >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
- [79, 67, 74, 66, 78]
-
- The algorithm can also be used to generate weighted random permutations.
- The relative weight of each item determines the probability that it
- appears late in the permutation.
-
- >>> data = "abcdefgh"
- >>> weights = range(1, len(data) + 1)
- >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
- ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
- """
- if k == 0:
- return []
-
- iterable = iter(iterable)
- if weights is None:
- return _sample_unweighted(iterable, k)
- else:
- weights = iter(weights)
- return _sample_weighted(iterable, k, weights)
-
-
-def is_sorted(iterable, key=None, reverse=False, strict=False):
- """Returns ``True`` if the items of iterable are in sorted order, and
- ``False`` otherwise. *key* and *reverse* have the same meaning that they do
- in the built-in :func:`sorted` function.
-
- >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
- True
- >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
- False
-
- If *strict*, tests for strict sorting, that is, returns ``False`` if equal
- elements are found:
-
- >>> is_sorted([1, 2, 2])
- True
- >>> is_sorted([1, 2, 2], strict=True)
- False
-
- The function returns ``False`` after encountering the first out-of-order
- item. If there are no out-of-order items, the iterable is exhausted.
- """
-
- compare = (le if reverse else ge) if strict else (lt if reverse else gt)
- it = iterable if key is None else map(key, iterable)
- return not any(starmap(compare, pairwise(it)))
-
-
-class AbortThread(BaseException):
- pass
-
-
-class callback_iter:
- """Convert a function that uses callbacks to an iterator.
-
- Let *func* be a function that takes a `callback` keyword argument.
- For example:
-
- >>> def func(callback=None):
- ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
- ... if callback:
- ... callback(i, c)
- ... return 4
-
-
- Use ``with callback_iter(func)`` to get an iterator over the parameters
- that are delivered to the callback.
-
- >>> with callback_iter(func) as it:
- ... for args, kwargs in it:
- ... print(args)
- (1, 'a')
- (2, 'b')
- (3, 'c')
-
- The function will be called in a background thread. The ``done`` property
- indicates whether it has completed execution.
-
- >>> it.done
- True
-
- If it completes successfully, its return value will be available
- in the ``result`` property.
-
- >>> it.result
- 4
-
- Notes:
-
- * If the function uses some keyword argument besides ``callback``, supply
- *callback_kwd*.
- * If it finished executing, but raised an exception, accessing the
- ``result`` property will raise the same exception.
- * If it hasn't finished executing, accessing the ``result``
- property from within the ``with`` block will raise ``RuntimeError``.
- * If it hasn't finished executing, accessing the ``result`` property from
- outside the ``with`` block will raise a
- ``more_itertools.AbortThread`` exception.
- * Provide *wait_seconds* to adjust how frequently the it is polled for
- output.
-
- """
-
- def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
- self._func = func
- self._callback_kwd = callback_kwd
- self._aborted = False
- self._future = None
- self._wait_seconds = wait_seconds
- self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
- self._iterator = self._reader()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self._aborted = True
- self._executor.shutdown()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterator)
-
- @property
- def done(self):
- if self._future is None:
- return False
- return self._future.done()
-
- @property
- def result(self):
- if not self.done:
- raise RuntimeError('Function has not yet completed')
-
- return self._future.result()
-
- def _reader(self):
- q = Queue()
-
- def callback(*args, **kwargs):
- if self._aborted:
- raise AbortThread('canceled by user')
-
- q.put((args, kwargs))
-
- self._future = self._executor.submit(
- self._func, **{self._callback_kwd: callback}
- )
-
- while True:
- try:
- item = q.get(timeout=self._wait_seconds)
- except Empty:
- pass
- else:
- q.task_done()
- yield item
-
- if self._future.done():
- break
-
- remaining = []
- while True:
- try:
- item = q.get_nowait()
- except Empty:
- break
- else:
- q.task_done()
- remaining.append(item)
- q.join()
- yield from remaining
-
-
-def windowed_complete(iterable, n):
- """
- Yield ``(beginning, middle, end)`` tuples, where:
-
- * Each ``middle`` has *n* items from *iterable*
- * Each ``beginning`` has the items before the ones in ``middle``
- * Each ``end`` has the items after the ones in ``middle``
-
- >>> iterable = range(7)
- >>> n = 3
- >>> for beginning, middle, end in windowed_complete(iterable, n):
- ... print(beginning, middle, end)
- () (0, 1, 2) (3, 4, 5, 6)
- (0,) (1, 2, 3) (4, 5, 6)
- (0, 1) (2, 3, 4) (5, 6)
- (0, 1, 2) (3, 4, 5) (6,)
- (0, 1, 2, 3) (4, 5, 6) ()
-
- Note that *n* must be at least 0 and most equal to the length of
- *iterable*.
-
- This function will exhaust the iterable and may require significant
- storage.
- """
- if n < 0:
- raise ValueError('n must be >= 0')
-
- seq = tuple(iterable)
- size = len(seq)
-
- if n > size:
- raise ValueError('n must be <= len(seq)')
-
- for i in range(size - n + 1):
- beginning = seq[:i]
- middle = seq[i : i + n]
- end = seq[i + n :]
- yield beginning, middle, end
-
-
-def all_unique(iterable, key=None):
- """
- Returns ``True`` if all the elements of *iterable* are unique (no two
- elements are equal).
-
- >>> all_unique('ABCB')
- False
-
- If a *key* function is specified, it will be used to make comparisons.
-
- >>> all_unique('ABCb')
- True
- >>> all_unique('ABCb', str.lower)
- False
-
- The function returns as soon as the first non-unique element is
- encountered. Iterables with a mix of hashable and unhashable items can
- be used, but the function will be slower for unhashable items.
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- for element in map(key, iterable) if key else iterable:
- try:
- if element in seenset:
- return False
- seenset_add(element)
- except TypeError:
- if element in seenlist:
- return False
- seenlist_add(element)
- return True
-
-
-def nth_product(index, *args):
- """Equivalent to ``list(product(*args))[index]``.
-
- The products of *args* can be ordered lexicographically.
- :func:`nth_product` computes the product at sort position *index* without
- computing the previous products.
-
- >>> nth_product(8, range(2), range(2), range(2), range(2))
- (1, 0, 0, 0)
-
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pools = list(map(tuple, reversed(args)))
- ns = list(map(len, pools))
-
- c = reduce(mul, ns)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- result = []
- for pool, n in zip(pools, ns):
- result.append(pool[index % n])
- index //= n
-
- return tuple(reversed(result))
-
-
-def nth_permutation(iterable, r, index):
- """Equivalent to ``list(permutations(iterable, r))[index]```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`nth_permutation`
- computes the subsequence at sort position *index* directly, without
- computing the previous subsequences.
-
- >>> nth_permutation('ghijk', 2, 5)
- ('h', 'i')
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = list(iterable)
- n = len(pool)
-
- if r is None or r == n:
- r, c = n, factorial(n)
- elif not 0 <= r < n:
- raise ValueError
- else:
- c = factorial(n) // factorial(n - r)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- if c == 0:
- return tuple()
-
- result = [0] * r
- q = index * factorial(n) // c if r < n else index
- for d in range(1, n + 1):
- q, i = divmod(q, d)
- if 0 <= n - d < r:
- result[n - d] = i
- if q == 0:
- break
-
- return tuple(map(pool.pop, result))
-
-
-def value_chain(*args):
- """Yield all arguments passed to the function in the same order in which
- they were passed. If an argument itself is iterable then iterate over its
- values.
-
- >>> list(value_chain(1, 2, 3, [4, 5, 6]))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and are emitted
- as-is:
-
- >>> list(value_chain('12', '34', ['56', '78']))
- ['12', '34', '56', '78']
-
-
- Multiple levels of nesting are not flattened.
-
- """
- for value in args:
- if isinstance(value, (str, bytes)):
- yield value
- continue
- try:
- yield from value
- except TypeError:
- yield value
-
-
-def product_index(element, *args):
- """Equivalent to ``list(product(*args)).index(element)``
-
- The products of *args* can be ordered lexicographically.
- :func:`product_index` computes the first index of *element* without
- computing the previous products.
-
- >>> product_index([8, 2], range(10), range(5))
- 42
-
- ``ValueError`` will be raised if the given *element* isn't in the product
- of *args*.
- """
- index = 0
-
- for x, pool in zip_longest(element, args, fillvalue=_marker):
- if x is _marker or pool is _marker:
- raise ValueError('element is not a product of args')
-
- pool = tuple(pool)
- index = index * len(pool) + pool.index(x)
-
- return index
-
-
-def combination_index(element, iterable):
- """Equivalent to ``list(combinations(iterable, r)).index(element)``
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`combination_index` computes the index of the
- first *element*, without computing the previous combinations.
-
- >>> combination_index('adf', 'abcdefg')
- 10
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- combinations of *iterable*.
- """
- element = enumerate(element)
- k, y = next(element, (None, None))
- if k is None:
- return 0
-
- indexes = []
- pool = enumerate(iterable)
- for n, x in pool:
- if x == y:
- indexes.append(n)
- tmp, y = next(element, (None, None))
- if tmp is None:
- break
- else:
- k = tmp
- else:
- raise ValueError('element is not a combination of iterable')
-
- n, _ = last(pool, default=(n, None))
-
- # Python versiosn below 3.8 don't have math.comb
- index = 1
- for i, j in enumerate(reversed(indexes), start=1):
- j = n - j
- if i <= j:
- index += factorial(j) // (factorial(i) * factorial(j - i))
-
- return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
-
-
-def permutation_index(element, iterable):
- """Equivalent to ``list(permutations(iterable, r)).index(element)```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`permutation_index`
- computes the index of the first *element* directly, without computing
- the previous permutations.
-
- >>> permutation_index([1, 3, 2], range(5))
- 19
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- permutations of *iterable*.
- """
- index = 0
- pool = list(iterable)
- for i, x in zip(range(len(pool), -1, -1), element):
- r = pool.index(x)
- index = index * i + r
- del pool[r]
-
- return index
-
-
-class countable:
- """Wrap *iterable* and keep a count of how many items have been consumed.
-
- The ``items_seen`` attribute starts at ``0`` and increments as the iterable
- is consumed:
-
- >>> iterable = map(str, range(10))
- >>> it = countable(iterable)
- >>> it.items_seen
- 0
- >>> next(it), next(it)
- ('0', '1')
- >>> list(it)
- ['2', '3', '4', '5', '6', '7', '8', '9']
- >>> it.items_seen
- 10
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self.items_seen = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._it)
- self.items_seen += 1
-
- return item
-
-
-def chunked_even(iterable, n):
- """Break *iterable* into lists of approximately length *n*.
- Items are distributed such the lengths of the lists differ by at most
- 1 item.
-
- >>> iterable = [1, 2, 3, 4, 5, 6, 7]
- >>> n = 3
- >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
- [[1, 2, 3], [4, 5], [6, 7]]
- >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
- [[1, 2, 3], [4, 5, 6], [7]]
-
- """
-
- len_method = getattr(iterable, '__len__', None)
-
- if len_method is None:
- return _chunked_even_online(iterable, n)
- else:
- return _chunked_even_finite(iterable, len_method(), n)
-
-
-def _chunked_even_online(iterable, n):
- buffer = []
- maxbuf = n + (n - 2) * (n - 1)
- for x in iterable:
- buffer.append(x)
- if len(buffer) == maxbuf:
- yield buffer[:n]
- buffer = buffer[n:]
- yield from _chunked_even_finite(buffer, len(buffer), n)
-
-
-def _chunked_even_finite(iterable, N, n):
- if N < 1:
- return
-
- # Lists are either size `full_size <= n` or `partial_size = full_size - 1`
- q, r = divmod(N, n)
- num_lists = q + (1 if r > 0 else 0)
- q, r = divmod(N, num_lists)
- full_size = q + (1 if r > 0 else 0)
- partial_size = full_size - 1
- num_full = N - partial_size * num_lists
- num_partial = num_lists - num_full
-
- buffer = []
- iterator = iter(iterable)
-
- # Yield num_full lists of full_size
- for x in iterator:
- buffer.append(x)
- if len(buffer) == full_size:
- yield buffer
- buffer = []
- num_full -= 1
- if num_full <= 0:
- break
-
- # Yield num_partial lists of partial_size
- for x in iterator:
- buffer.append(x)
- if len(buffer) == partial_size:
- yield buffer
- buffer = []
- num_partial -= 1
-
-
-def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
- """A version of :func:`zip` that "broadcasts" any scalar
- (i.e., non-iterable) items into output tuples.
-
- >>> iterable_1 = [1, 2, 3]
- >>> iterable_2 = ['a', 'b', 'c']
- >>> scalar = '_'
- >>> list(zip_broadcast(iterable_1, iterable_2, scalar))
- [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
-
- The *scalar_types* keyword argument determines what types are considered
- scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
- treat strings and byte strings as iterable:
-
- >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
- [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
-
- If the *strict* keyword argument is ``True``, then
- ``UnequalIterablesError`` will be raised if any of the iterables have
- different lengthss.
- """
-
- def is_scalar(obj):
- if scalar_types and isinstance(obj, scalar_types):
- return True
- try:
- iter(obj)
- except TypeError:
- return True
- else:
- return False
-
- size = len(objects)
- if not size:
- return
-
- iterables, iterable_positions = [], []
- scalars, scalar_positions = [], []
- for i, obj in enumerate(objects):
- if is_scalar(obj):
- scalars.append(obj)
- scalar_positions.append(i)
- else:
- iterables.append(iter(obj))
- iterable_positions.append(i)
-
- if len(scalars) == size:
- yield tuple(objects)
- return
-
- zipper = _zip_equal if strict else zip
- for item in zipper(*iterables):
- new_item = [None] * size
-
- for i, elem in zip(iterable_positions, item):
- new_item[i] = elem
-
- for i, elem in zip(scalar_positions, scalars):
- new_item[i] = elem
-
- yield tuple(new_item)
-
-
-def unique_in_window(iterable, n, key=None):
- """Yield the items from *iterable* that haven't been seen recently.
- *n* is the size of the lookback window.
-
- >>> iterable = [0, 1, 0, 2, 3, 0]
- >>> n = 3
- >>> list(unique_in_window(iterable, n))
- [0, 1, 2, 3, 0]
-
- The *key* function, if provided, will be used to determine uniqueness:
-
- >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
- ['a', 'b', 'c', 'd', 'a']
-
- The items in *iterable* must be hashable.
-
- """
- if n <= 0:
- raise ValueError('n must be greater than 0')
-
- window = deque(maxlen=n)
- uniques = set()
- use_key = key is not None
-
- for item in iterable:
- k = key(item) if use_key else item
- if k in uniques:
- continue
-
- if len(uniques) == n:
- uniques.discard(window[0])
-
- uniques.add(k)
- window.append(k)
-
- yield item
-
-
-def duplicates_everseen(iterable, key=None):
- """Yield duplicate elements after their first appearance.
-
- >>> list(duplicates_everseen('mississippi'))
- ['s', 'i', 's', 's', 'i', 'p', 'i']
- >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
- ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
-
- This function is analagous to :func:`unique_everseen` and is subject to
- the same performance considerations.
-
- """
- seen_set = set()
- seen_list = []
- use_key = key is not None
-
- for element in iterable:
- k = key(element) if use_key else element
- try:
- if k not in seen_set:
- seen_set.add(k)
- else:
- yield element
- except TypeError:
- if k not in seen_list:
- seen_list.append(k)
- else:
- yield element
-
-
-def duplicates_justseen(iterable, key=None):
- """Yields serially-duplicate elements after their first appearance.
-
- >>> list(duplicates_justseen('mississippi'))
- ['s', 's', 'p']
- >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
- ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
-
- This function is analagous to :func:`unique_justseen`.
-
- """
- return flatten(
- map(
- lambda group_tuple: islice_extended(group_tuple[1])[1:],
- groupby(iterable, key),
- )
- )
-
-
-def minmax(iterable_or_value, *others, key=None, default=_marker):
- """Returns both the smallest and largest items in an iterable
- or the largest of two or more arguments.
-
- >>> minmax([3, 1, 5])
- (1, 5)
-
- >>> minmax(4, 2, 6)
- (2, 6)
-
- If a *key* function is provided, it will be used to transform the input
- items for comparison.
-
- >>> minmax([5, 30], key=str) # '30' sorts before '5'
- (30, 5)
-
- If a *default* value is provided, it will be returned if there are no
- input items.
-
- >>> minmax([], default=(0, 0))
- (0, 0)
-
- Otherwise ``ValueError`` is raised.
-
- This function is based on the
- `recipe `__ by
- Raymond Hettinger and takes care to minimize the number of comparisons
- performed.
- """
- iterable = (iterable_or_value, *others) if others else iterable_or_value
-
- it = iter(iterable)
-
- try:
- lo = hi = next(it)
- except StopIteration as e:
- if default is _marker:
- raise ValueError(
- '`minmax()` argument is an empty iterable. '
- 'Provide a `default` value to suppress this error.'
- ) from e
- return default
-
- # Different branches depending on the presence of key. This saves a lot
- # of unimportant copies which would slow the "key=None" branch
- # significantly down.
- if key is None:
- for x, y in zip_longest(it, it, fillvalue=lo):
- if y < x:
- x, y = y, x
- if x < lo:
- lo = x
- if hi < y:
- hi = y
-
- else:
- lo_key = hi_key = key(lo)
-
- for x, y in zip_longest(it, it, fillvalue=lo):
-
- x_key, y_key = key(x), key(y)
-
- if y_key < x_key:
- x, y, x_key, y_key = y, x, y_key, x_key
- if x_key < lo_key:
- lo, lo_key = x, x_key
- if hi_key < y_key:
- hi, hi_key = y, y_key
-
- return lo, hi
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py
deleted file mode 100644
index a2596423..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""Imported from the recipes section of the itertools documentation.
-
-All functions taken from the recipes section of the itertools library docs
-[1]_.
-Some backward-compatible usability improvements have been made.
-
-.. [1] http://docs.python.org/library/itertools.html#recipes
-
-"""
-import warnings
-from collections import deque
-from itertools import (
- chain,
- combinations,
- count,
- cycle,
- groupby,
- islice,
- repeat,
- starmap,
- tee,
- zip_longest,
-)
-import operator
-from random import randrange, sample, choice
-
-__all__ = [
- 'all_equal',
- 'before_and_after',
- 'consume',
- 'convolve',
- 'dotproduct',
- 'first_true',
- 'flatten',
- 'grouper',
- 'iter_except',
- 'ncycles',
- 'nth',
- 'nth_combination',
- 'padnone',
- 'pad_none',
- 'pairwise',
- 'partition',
- 'powerset',
- 'prepend',
- 'quantify',
- 'random_combination_with_replacement',
- 'random_combination',
- 'random_permutation',
- 'random_product',
- 'repeatfunc',
- 'roundrobin',
- 'sliding_window',
- 'tabulate',
- 'tail',
- 'take',
- 'triplewise',
- 'unique_everseen',
- 'unique_justseen',
-]
-
-
-def take(n, iterable):
- """Return first *n* items of the iterable as a list.
-
- >>> take(3, range(10))
- [0, 1, 2]
-
- If there are fewer than *n* items in the iterable, all of them are
- returned.
-
- >>> take(10, range(3))
- [0, 1, 2]
-
- """
- return list(islice(iterable, n))
-
-
-def tabulate(function, start=0):
- """Return an iterator over the results of ``func(start)``,
- ``func(start + 1)``, ``func(start + 2)``...
-
- *func* should be a function that accepts one integer argument.
-
- If *start* is not specified it defaults to 0. It will be incremented each
- time the iterator is advanced.
-
- >>> square = lambda x: x ** 2
- >>> iterator = tabulate(square, -3)
- >>> take(4, iterator)
- [9, 4, 1, 0]
-
- """
- return map(function, count(start))
-
-
-def tail(n, iterable):
- """Return an iterator over the last *n* items of *iterable*.
-
- >>> t = tail(3, 'ABCDEFG')
- >>> list(t)
- ['E', 'F', 'G']
-
- """
- return iter(deque(iterable, maxlen=n))
-
-
-def consume(iterator, n=None):
- """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
- entirely.
-
- Efficiently exhausts an iterator without returning values. Defaults to
- consuming the whole iterator, but an optional second argument may be
- provided to limit consumption.
-
- >>> i = (x for x in range(10))
- >>> next(i)
- 0
- >>> consume(i, 3)
- >>> next(i)
- 4
- >>> consume(i)
- >>> next(i)
- Traceback (most recent call last):
- File "", line 1, in
- StopIteration
-
- If the iterator has fewer items remaining than the provided limit, the
- whole iterator will be consumed.
-
- >>> i = (x for x in range(3))
- >>> consume(i, 5)
- >>> next(i)
- Traceback (most recent call last):
- File "", line 1, in
- StopIteration
-
- """
- # Use functions that consume iterators at C speed.
- if n is None:
- # feed the entire iterator into a zero-length deque
- deque(iterator, maxlen=0)
- else:
- # advance to the empty slice starting at position n
- next(islice(iterator, n, n), None)
-
-
-def nth(iterable, n, default=None):
- """Returns the nth item or a default value.
-
- >>> l = range(10)
- >>> nth(l, 3)
- 3
- >>> nth(l, 20, "zebra")
- 'zebra'
-
- """
- return next(islice(iterable, n, None), default)
-
-
-def all_equal(iterable):
- """
- Returns ``True`` if all the elements are equal to each other.
-
- >>> all_equal('aaaa')
- True
- >>> all_equal('aaab')
- False
-
- """
- g = groupby(iterable)
- return next(g, True) and not next(g, False)
-
-
-def quantify(iterable, pred=bool):
- """Return the how many times the predicate is true.
-
- >>> quantify([True, False, True])
- 2
-
- """
- return sum(map(pred, iterable))
-
-
-def pad_none(iterable):
- """Returns the sequence of elements and then returns ``None`` indefinitely.
-
- >>> take(5, pad_none(range(3)))
- [0, 1, 2, None, None]
-
- Useful for emulating the behavior of the built-in :func:`map` function.
-
- See also :func:`padded`.
-
- """
- return chain(iterable, repeat(None))
-
-
-padnone = pad_none
-
-
-def ncycles(iterable, n):
- """Returns the sequence elements *n* times
-
- >>> list(ncycles(["a", "b"], 3))
- ['a', 'b', 'a', 'b', 'a', 'b']
-
- """
- return chain.from_iterable(repeat(tuple(iterable), n))
-
-
-def dotproduct(vec1, vec2):
- """Returns the dot product of the two iterables.
-
- >>> dotproduct([10, 10], [20, 20])
- 400
-
- """
- return sum(map(operator.mul, vec1, vec2))
-
-
-def flatten(listOfLists):
- """Return an iterator flattening one level of nesting in a list of lists.
-
- >>> list(flatten([[0, 1], [2, 3]]))
- [0, 1, 2, 3]
-
- See also :func:`collapse`, which can flatten multiple levels of nesting.
-
- """
- return chain.from_iterable(listOfLists)
-
-
-def repeatfunc(func, times=None, *args):
- """Call *func* with *args* repeatedly, returning an iterable over the
- results.
-
- If *times* is specified, the iterable will terminate after that many
- repetitions:
-
- >>> from operator import add
- >>> times = 4
- >>> args = 3, 5
- >>> list(repeatfunc(add, times, *args))
- [8, 8, 8, 8]
-
- If *times* is ``None`` the iterable will not terminate:
-
- >>> from random import randrange
- >>> times = None
- >>> args = 1, 11
- >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
- [2, 4, 8, 1, 8, 4]
-
- """
- if times is None:
- return starmap(func, repeat(args))
- return starmap(func, repeat(args, times))
-
-
-def _pairwise(iterable):
- """Returns an iterator of paired items, overlapping, from the original
-
- >>> take(4, pairwise(count()))
- [(0, 1), (1, 2), (2, 3), (3, 4)]
-
- On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
-
- """
- a, b = tee(iterable)
- next(b, None)
- yield from zip(a, b)
-
-
-try:
- from itertools import pairwise as itertools_pairwise
-except ImportError:
- pairwise = _pairwise
-else:
-
- def pairwise(iterable):
- yield from itertools_pairwise(iterable)
-
- pairwise.__doc__ = _pairwise.__doc__
-
-
-def grouper(iterable, n, fillvalue=None):
- """Collect data into fixed-length chunks or blocks.
-
- >>> list(grouper('ABCDEFG', 3, 'x'))
- [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
-
- """
- if isinstance(iterable, int):
- warnings.warn(
- "grouper expects iterable as first parameter", DeprecationWarning
- )
- n, iterable = iterable, n
- args = [iter(iterable)] * n
- return zip_longest(fillvalue=fillvalue, *args)
-
-
-def roundrobin(*iterables):
- """Yields an item from each iterable, alternating between them.
-
- >>> list(roundrobin('ABC', 'D', 'EF'))
- ['A', 'D', 'E', 'B', 'F', 'C']
-
- This function produces the same output as :func:`interleave_longest`, but
- may perform better for some inputs (in particular when the number of
- iterables is small).
-
- """
- # Recipe credited to George Sakkis
- pending = len(iterables)
- nexts = cycle(iter(it).__next__ for it in iterables)
- while pending:
- try:
- for next in nexts:
- yield next()
- except StopIteration:
- pending -= 1
- nexts = cycle(islice(nexts, pending))
-
-
-def partition(pred, iterable):
- """
- Returns a 2-tuple of iterables derived from the input iterable.
- The first yields the items that have ``pred(item) == False``.
- The second yields the items that have ``pred(item) == True``.
-
- >>> is_odd = lambda x: x % 2 != 0
- >>> iterable = range(10)
- >>> even_items, odd_items = partition(is_odd, iterable)
- >>> list(even_items), list(odd_items)
- ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
-
- If *pred* is None, :func:`bool` is used.
-
- >>> iterable = [0, 1, False, True, '', ' ']
- >>> false_items, true_items = partition(None, iterable)
- >>> list(false_items), list(true_items)
- ([0, False, ''], [1, True, ' '])
-
- """
- if pred is None:
- pred = bool
-
- evaluations = ((pred(x), x) for x in iterable)
- t1, t2 = tee(evaluations)
- return (
- (x for (cond, x) in t1 if not cond),
- (x for (cond, x) in t2 if cond),
- )
-
-
-def powerset(iterable):
- """Yields all possible subsets of the iterable.
-
- >>> list(powerset([1, 2, 3]))
- [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
-
- :func:`powerset` will operate on iterables that aren't :class:`set`
- instances, so repeated elements in the input will produce repeated elements
- in the output. Use :func:`unique_everseen` on the input to avoid generating
- duplicates:
-
- >>> seq = [1, 1, 0]
- >>> list(powerset(seq))
- [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
- >>> from more_itertools import unique_everseen
- >>> list(powerset(unique_everseen(seq)))
- [(), (1,), (0,), (1, 0)]
-
- """
- s = list(iterable)
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
-
-
-def unique_everseen(iterable, key=None):
- """
- Yield unique elements, preserving order.
-
- >>> list(unique_everseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D']
- >>> list(unique_everseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'D']
-
- Sequences with a mix of hashable and unhashable items can be used.
- The function will be slower (i.e., `O(n^2)`) for unhashable items.
-
- Remember that ``list`` objects are unhashable - you can use the *key*
- parameter to transform the list to a tuple (which is hashable) to
- avoid a slowdown.
-
- >>> iterable = ([1, 2], [2, 3], [1, 2])
- >>> list(unique_everseen(iterable)) # Slow
- [[1, 2], [2, 3]]
- >>> list(unique_everseen(iterable, key=tuple)) # Faster
- [[1, 2], [2, 3]]
-
- Similary, you may want to convert unhashable ``set`` objects with
- ``key=frozenset``. For ``dict`` objects,
- ``key=lambda x: frozenset(x.items())`` can be used.
-
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- use_key = key is not None
-
- for element in iterable:
- k = key(element) if use_key else element
- try:
- if k not in seenset:
- seenset_add(k)
- yield element
- except TypeError:
- if k not in seenlist:
- seenlist_add(k)
- yield element
-
-
-def unique_justseen(iterable, key=None):
- """Yields elements in order, ignoring serial duplicates
-
- >>> list(unique_justseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D', 'A', 'B']
- >>> list(unique_justseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'A', 'D']
-
- """
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
-
-
-def iter_except(func, exception, first=None):
- """Yields results from a function repeatedly until an exception is raised.
-
- Converts a call-until-exception interface to an iterator interface.
- Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
- to end the loop.
-
- >>> l = [0, 1, 2]
- >>> list(iter_except(l.pop, IndexError))
- [2, 1, 0]
-
- Multiple exceptions can be specified as a stopping condition:
-
- >>> l = [1, 2, 3, '...', 4, 5, 6]
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
- [7, 6, 5]
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
- [4, 3, 2]
- >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
- []
-
- """
- try:
- if first is not None:
- yield first()
- while 1:
- yield func()
- except exception:
- pass
-
-
-def first_true(iterable, default=None, pred=None):
- """
- Returns the first true value in the iterable.
-
- If no true value is found, returns *default*
-
- If *pred* is not None, returns the first item for which
- ``pred(item) == True`` .
-
- >>> first_true(range(10))
- 1
- >>> first_true(range(10), pred=lambda x: x > 5)
- 6
- >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
- 'missing'
-
- """
- return next(filter(pred, iterable), default)
-
-
-def random_product(*args, repeat=1):
- """Draw an item at random from each of the input iterables.
-
- >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
- ('c', 3, 'Z')
-
- If *repeat* is provided as a keyword argument, that many items will be
- drawn from each iterable.
-
- >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
- ('a', 2, 'd', 3)
-
- This equivalent to taking a random selection from
- ``itertools.product(*args, **kwarg)``.
-
- """
- pools = [tuple(pool) for pool in args] * repeat
- return tuple(choice(pool) for pool in pools)
-
-
-def random_permutation(iterable, r=None):
- """Return a random *r* length permutation of the elements in *iterable*.
-
- If *r* is not specified or is ``None``, then *r* defaults to the length of
- *iterable*.
-
- >>> random_permutation(range(5)) # doctest:+SKIP
- (3, 4, 0, 1, 2)
-
- This equivalent to taking a random selection from
- ``itertools.permutations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- r = len(pool) if r is None else r
- return tuple(sample(pool, r))
-
-
-def random_combination(iterable, r):
- """Return a random *r* length subsequence of the elements in *iterable*.
-
- >>> random_combination(range(5), 3) # doctest:+SKIP
- (2, 3, 4)
-
- This equivalent to taking a random selection from
- ``itertools.combinations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(sample(range(n), r))
- return tuple(pool[i] for i in indices)
-
-
-def random_combination_with_replacement(iterable, r):
- """Return a random *r* length subsequence of elements in *iterable*,
- allowing individual elements to be repeated.
-
- >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
- (0, 0, 1, 2, 2)
-
- This equivalent to taking a random selection from
- ``itertools.combinations_with_replacement(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(randrange(n) for i in range(r))
- return tuple(pool[i] for i in indices)
-
-
-def nth_combination(iterable, r, index):
- """Equivalent to ``list(combinations(iterable, r))[index]``.
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`nth_combination` computes the subsequence at
- sort position *index* directly, without computing the previous
- subsequences.
-
- >>> nth_combination(range(5), 3, 5)
- (0, 3, 4)
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = tuple(iterable)
- n = len(pool)
- if (r < 0) or (r > n):
- raise ValueError
-
- c = 1
- k = min(r, n - r)
- for i in range(1, k + 1):
- c = c * (n - k + i) // i
-
- if index < 0:
- index += c
-
- if (index < 0) or (index >= c):
- raise IndexError
-
- result = []
- while r:
- c, n, r = c * r // n, n - 1, r - 1
- while index >= c:
- index -= c
- c, n = c * (n - r) // n, n - 1
- result.append(pool[-1 - n])
-
- return tuple(result)
-
-
-def prepend(value, iterator):
- """Yield *value*, followed by the elements in *iterator*.
-
- >>> value = '0'
- >>> iterator = ['1', '2', '3']
- >>> list(prepend(value, iterator))
- ['0', '1', '2', '3']
-
- To prepend multiple values, see :func:`itertools.chain`
- or :func:`value_chain`.
-
- """
- return chain([value], iterator)
-
-
-def convolve(signal, kernel):
- """Convolve the iterable *signal* with the iterable *kernel*.
-
- >>> signal = (1, 2, 3, 4, 5)
- >>> kernel = [3, 2, 1]
- >>> list(convolve(signal, kernel))
- [3, 8, 14, 20, 26, 14, 5]
-
- Note: the input arguments are not interchangeable, as the *kernel*
- is immediately consumed and stored.
-
- """
- kernel = tuple(kernel)[::-1]
- n = len(kernel)
- window = deque([0], maxlen=n) * n
- for x in chain(signal, repeat(0, n - 1)):
- window.append(x)
- yield sum(map(operator.mul, kernel, window))
-
-
-def before_and_after(predicate, it):
- """A variant of :func:`takewhile` that allows complete access to the
- remainder of the iterator.
-
- >>> it = iter('ABCdEfGhI')
- >>> all_upper, remainder = before_and_after(str.isupper, it)
- >>> ''.join(all_upper)
- 'ABC'
- >>> ''.join(remainder) # takewhile() would lose the 'd'
- 'dEfGhI'
-
- Note that the first iterator must be fully consumed before the second
- iterator can generate valid results.
- """
- it = iter(it)
- transition = []
-
- def true_iterator():
- for elem in it:
- if predicate(elem):
- yield elem
- else:
- transition.append(elem)
- return
-
- def remainder_iterator():
- yield from transition
- yield from it
-
- return true_iterator(), remainder_iterator()
-
-
-def triplewise(iterable):
- """Return overlapping triplets from *iterable*.
-
- >>> list(triplewise('ABCDE'))
- [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
-
- """
- for (a, _), (b, c) in pairwise(pairwise(iterable)):
- yield a, b, c
-
-
-def sliding_window(iterable, n):
- """Return a sliding window of width *n* over *iterable*.
-
- >>> list(sliding_window(range(6), 4))
- [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
-
- If *iterable* has fewer than *n* items, then nothing is yielded:
-
- >>> list(sliding_window(range(3), 4))
- []
-
- For a variant with more features, see :func:`windowed`.
- """
- it = iter(iterable)
- window = deque(islice(it, n), maxlen=n)
- if len(window) == n:
- yield tuple(window)
- for x in it:
- window.append(x)
- yield tuple(window)
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__about__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__about__.py
deleted file mode 100644
index 3551bc2d..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__about__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
-
-__title__ = "packaging"
-__summary__ = "Core utilities for Python packages"
-__uri__ = "https://github.com/pypa/packaging"
-
-__version__ = "21.3"
-
-__author__ = "Donald Stufft and individual contributors"
-__email__ = "donald@stufft.io"
-
-__license__ = "BSD-2-Clause or Apache-2.0"
-__copyright__ = "2014-2019 %s" % __author__
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__init__.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5dc..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
- __author__,
- __copyright__,
- __email__,
- __license__,
- __summary__,
- __title__,
- __uri__,
- __version__,
-)
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
diff --git a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py b/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py
deleted file mode 100644
index 4c379aa6..00000000
--- a/Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import collections
-import functools
-import os
-import re
-import struct
-import sys
-import warnings
-from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
-
-
-# Python does not provide platform information at sufficient granularity to
-# identify the architecture of the running executable in some cases, so we
-# determine it dynamically by reading the information from the running
-# process. This only applies on Linux, which uses the ELF format.
-class _ELFFileHeader:
- # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
- class _InvalidELFFileHeader(ValueError):
- """
- An invalid ELF file header was found.
- """
-
- ELF_MAGIC_NUMBER = 0x7F454C46
- ELFCLASS32 = 1
- ELFCLASS64 = 2
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
- EM_386 = 3
- EM_S390 = 22
- EM_ARM = 40
- EM_X86_64 = 62
- EF_ARM_ABIMASK = 0xFF000000
- EF_ARM_ABI_VER5 = 0x05000000
- EF_ARM_ABI_FLOAT_HARD = 0x00000400
-
- def __init__(self, file: IO[bytes]) -> None:
- def unpack(fmt: str) -> int:
- try:
- data = file.read(struct.calcsize(fmt))
- result: Tuple[int, ...] = struct.unpack(fmt, data)
- except struct.error:
- raise _ELFFileHeader._InvalidELFFileHeader()
- return result[0]
-
- self.e_ident_magic = unpack(">I")
- if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_class = unpack("B")
- if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_data = unpack("B")
- if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_version = unpack("B")
- self.e_ident_osabi = unpack("B")
- self.e_ident_abiversion = unpack("B")
- self.e_ident_pad = file.read(7)
- format_h = "H"
- format_i = "I"
- format_q = "